+++ /dev/null
-[run]
-include =
- pygments/*
+++ /dev/null
-name: 'Update Pyodide package'
-description: 'Update the WASM compiled Pygments with Pyodide'
-runs:
- using: 'docker'
- image: 'birkenfeld/pyodide-pygments-builder'
+++ /dev/null
-name: Pygments
-
-on: [push, pull_request]
-
-jobs:
- build:
- runs-on: ${{ matrix.os }}
- strategy:
- matrix:
- os: [ubuntu-latest, windows-latest]
- python-version: [3.5, 3.6, 3.7, 3.8, pypy3]
- max-parallel: 4
-
- steps:
- - uses: actions/checkout@v2
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
- with:
- python-version: ${{ matrix.python-version }}
- - name: Install package
- run: |
- python -m pip install --upgrade pip
- pip install -r requirements.txt
- pip install .
- - name: Test package
- run: make test TEST=-v
- if: runner.os == 'Linux'
- - name: Test package
- run: py.test
- if: runner.os == 'Windows' && ! contains(matrix['python-version'], 'pypy')
-
- lint:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-python@v2
- with:
- python-version: 3.8
- - name: Check out regexlint
- run: git clone https://github.com/pygments/regexlint
- - name: Run regexlint
- run: make regexlint REGEXLINT=`pwd`/regexlint
+++ /dev/null
-name: Docs
-
-on:
- push:
- branches:
- - master
-
-jobs:
- build:
- runs-on: ubuntu-latest
- steps:
- - name: Setup Python
- uses: actions/setup-python@v1
- with:
- python-version: 3.7
- - name: Checkout Pygments
- uses: actions/checkout@v1
- - name: Install Sphinx
- run: pip install Sphinx
- - name: Create Pyodide WASM package
- uses: ./.github/actions/pyodide-package
- - name: Sphinx build
- run: |
- cd doc
- WEBSITE_BUILD=1 make dirhtml
- cp -a ../pyodide _build/dirhtml/_static
- touch _build/dirhtml/.nojekyll
- echo -e 'pygments.org\nwww.pygments.org' > _build/dirhtml/CNAME
- echo 'Automated deployment of docs for GitHub pages.' > _build/dirhtml/README
- - name: Deploy to repo
- uses: peaceiris/actions-gh-pages@v2.5.0
- env:
- ACTIONS_DEPLOY_KEY: ${{ secrets.ACTIONS_DEPLOY_KEY }}
- EXTERNAL_REPOSITORY: pygments/pygments.github.io
- PUBLISH_BRANCH: master
- PUBLISH_DIR: ./doc/_build/dirhtml
+++ /dev/null
-*.egg
-*.pyc
-*.pyo
-.*.sw[op]
-/.pytest_cache/
-/.idea/
-/.project
-/.tags
-/.tox/
-/.cache/
-/Pygments.egg-info/*
-/TAGS
-/build/*
-/dist/*
-/doc/_build
-/.coverage
-/htmlcov
-/tests/examplefiles/output
-/.vscode
\ No newline at end of file
* Justin Hendrick -- ParaSail lexer
* Jordi Gutiérrez Hermoso -- Octave lexer
* David Hess, Fish Software, Inc. -- Objective-J lexer
+* Ken Hilton -- Typographic Number Theory and Arrow lexers
* Varun Hiremath -- Debian control lexer
* Rob Hoelz -- Perl 6 lexer
* Doug Hogan -- Mscgen lexer
<https://github.com/pygments/pygments/pulls>.
+Version 2.8.0
+-------------
+(not released yet)
+
+
+Version 2.7.2
+-------------
+(released October 24, 2020)
+
+- Updated lexers:
+
+ * Latex (PR#1517, #1516)
+ * LLVM (PR#1565)
+ * SPARQL (PR#1559)
+
+- Fixed Python console/traceback lexer problems with
+ custom exceptions without messages (#1548)
+- Allow loading ttc fonts on Mac/image formatter (#1223)
+- Improve ``analyze_text`` across a variety of lexers (PR#1549)
+- Removed CSS rule which forced the vertical padding to 0 for line numbers (PR#1583, #1579)
+- Fix ``TNTLexer`` crashing on unexpected EOL (#1568, PR#1570)
+- ``regexlint`` can be now run locally as part of ``tox`` tests (PR#1557)
+- Fix typos (PR#1550, #1562)
+- Add Python 3.9 as a supported version (PR#1554)
+
+
Version 2.7.1
-------------
(released September 16, 2020)
+++ /dev/null
-Licensing
-=========
-
-The code is distributed under the BSD 2-clause license. Contributors making pull
-requests must agree that they are able and willing to put their contributions
-under that license.
-
-Contribution checklist
-======================
-
-* Check the documentation for how to write
- [a new lexer](https://pygments.org/docs/lexerdevelopment/),
- [a new formatter](https://pygments.org/docs/formatterdevelopment/) or
- [a new filter](https://pygments.org/docs/filterdevelopment/)
-* When writing rules, try to merge simple rules. For instance, combine:
-
- ```python
- _PUNCTUATION = [
- (r"\(", token.Punctuation),
- (r"\)", token.Punctuation),
- (r"\[", token.Punctuation),
- (r"\]", token.Punctuation),
- ("{", token.Punctuation),
- ("}", token.Punctuation),
- ]
- ```
-
- into:
-
- ```python
- (r"[\(\)\[\]{}]", token.Punctuation)
- ```
-* Be careful with ``.*``. This matches greedily as much as it can. For instance,
- rule like ``@.*@`` will match the whole string ``@first@ second @third@``,
- instead of matching ``@first@`` and ``@second@``. You can use ``@.*?@`` in
- this case to stop early. The ``?`` tries to match _as few times_ as possible.
-* Don't add imports of your lexer anywhere in the codebase. (In case you're
- curious about ``compiled.py`` -- this file exists for backwards compatibility
- reasons.)
-* Use the standard importing convention: ``from token import Punctuation``
-* If you have a tricky case, you can use the ``testcase`` formatter to produce
- an unit test quickly. Run
- ``python -m pygments -l lua -f testcase <<< "local a = 5"``. This will
- produce a test case function skeleton.
\ No newline at end of file
all: clean-pyc check test
check:
+ @$(PYTHON) scripts/check_crlf.py pygments build external
@$(PYTHON) scripts/detect_missing_analyse_text.py || true
@pyflakes pygments | grep -v 'but unused' || true
@$(PYTHON) scripts/check_sources.py -i build -i dist -i pygments/lexers/_mapping.py \
-i docs/build -i pygments/formatters/_mapping.py -i pygments/unistring.py
clean: clean-pyc
- -rm -rf build tests/examplefiles/output
+ -rm -rf doc/_build build Pygments.egg-info tests/examplefiles/output
-rm -f codetags.html
clean-pyc:
- find . -name '*.pyc' -exec rm -f {} +
- find . -name '*.pyo' -exec rm -f {} +
- find . -name '*~' -exec rm -f {} +
+ find . -name '__pycache__' -exec rm -rf {} +
codetags:
@$(PYTHON) scripts/find_codetags.py -i tests/examplefiles -i scripts/pylintrc \
Metadata-Version: 1.2
Name: Pygments
-Version: 2.7.1
+Version: 2.7.2
Summary: Pygments is a syntax highlighting package written in Python.
Home-page: https://pygments.org/
Author: Georg Brandl
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy
Classifier: Operating System :: OS Independent
Metadata-Version: 1.2
Name: Pygments
-Version: 2.7.1
+Version: 2.7.2
Summary: Pygments is a syntax highlighting package written in Python.
Home-page: https://pygments.org/
Author: Georg Brandl
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy
Classifier: Operating System :: OS Independent
-.coveragerc
-.gitignore
AUTHORS
CHANGES
-Contributing.md
LICENSE
MANIFEST.in
Makefile
README.rst
-pytest.ini
-requirements.txt
setup.cfg
setup.py
-tox.ini
-.github/actions/pyodide-package/action.yml
-.github/workflows/build.yaml
-.github/workflows/docs.yaml
Pygments.egg-info/PKG-INFO
Pygments.egg-info/SOURCES.txt
Pygments.egg-info/dependency_links.txt
pygments/styles/vim.py
pygments/styles/vs.py
pygments/styles/xcode.py
+scripts/check_crlf.py
scripts/check_sources.py
scripts/debug_lexer.py
scripts/detect_missing_analyse_text.py
scripts/epydoc.css
-scripts/find_error.py
scripts/get_vimkw.py
scripts/pylintrc
scripts/release-checklist
scripts/vim2pygments.py
tests/__init__.py
+tests/test_analyze_lexer.py
tests/test_apache_conf.py
tests/test_asm.py
tests/test_basic.py
tests/test_sql.py
tests/test_terminal_formatter.py
tests/test_textfmts.py
+tests/test_tnt.py
tests/test_token.py
tests/test_unistring.py
tests/test_usd.py
import sys
from io import StringIO, BytesIO
-__version__ = '2.7.1'
+__version__ = '2.7.2'
__docformat__ = 'restructuredtext'
__all__ = ['lex', 'format', 'highlight']
@property
def _linenos_style(self):
- return 'color: %s; background-color: %s; padding: 0 5px 0 5px;' % (
+ return 'color: %s; background-color: %s; padding-left: 5px; padding-right: 5px;' % (
self.style.line_number_color,
self.style.line_number_background_color
)
@property
def _linenos_special_style(self):
- return 'color: %s; background-color: %s; padding: 0 5px 0 5px;' % (
+ return 'color: %s; background-color: %s; padding-left: 5px; padding-right: 5px;' % (
self.style.line_number_special_color,
self.style.line_number_special_background_color
)
'/Library/Fonts/', '/System/Library/Fonts/'):
font_map.update(
(os.path.splitext(f)[0].lower(), os.path.join(font_dir, f))
- for f in os.listdir(font_dir) if f.lower().endswith('ttf'))
+ for f in os.listdir(font_dir)
+ if f.lower().endswith(('ttf', 'ttc')))
for name in STYLES['NORMAL']:
path = self._get_mac_font_path(font_map, self.font_name, name)
from io import StringIO
from pygments.formatter import Formatter
-from pygments.lexer import Lexer
+from pygments.lexer import Lexer, do_insertions
from pygments.token import Token, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
+ # find and remove all the escape tokens (replace with an empty string)
+ # this is very similar to DelegatingLexer.get_tokens_unprocessed.
+ buffered = ''
+ insertions = []
+ insertion_buf = []
+ for i, t, v in self._find_safe_escape_tokens(text):
+ if t is None:
+ if insertion_buf:
+ insertions.append((len(buffered), insertion_buf))
+ insertion_buf = []
+ buffered += v
+ else:
+ insertion_buf.append((i, t, v))
+ if insertion_buf:
+ insertions.append((len(buffered), insertion_buf))
+ return do_insertions(insertions,
+ self.lang.get_tokens_unprocessed(buffered))
+
+ def _find_safe_escape_tokens(self, text):
+ """ find escape tokens that are not in strings or comments """
+ for i, t, v in self._filter_to(
+ self.lang.get_tokens_unprocessed(text),
+ lambda t: t in Token.Comment or t in Token.String
+ ):
+ if t is None:
+ for i2, t2, v2 in self._find_escape_tokens(v):
+ yield i + i2, t2, v2
+ else:
+ yield i, None, v
+
+ def _filter_to(self, it, pred):
+ """ Keep only the tokens that match `pred`, merge the others together """
buf = ''
idx = 0
- for i, t, v in self.lang.get_tokens_unprocessed(text):
- if t in Token.Comment or t in Token.String:
+ for i, t, v in it:
+ if pred(t):
if buf:
- yield from self.get_tokens_aux(idx, buf)
+ yield idx, None, buf
buf = ''
yield i, t, v
else:
idx = i
buf += v
if buf:
- yield from self.get_tokens_aux(idx, buf)
+ yield idx, None, buf
- def get_tokens_aux(self, index, text):
+ def _find_escape_tokens(self, text):
+ """ Find escape tokens within text, give token=None otherwise """
+ index = 0
while text:
a, sep1, text = text.partition(self.left)
if a:
- for i, t, v in self.lang.get_tokens_unprocessed(a):
- yield index + i, t, v
- index += len(a)
+ yield index, None, a
+ index += len(a)
if sep1:
b, sep2, text = text.partition(self.right)
if sep2:
'SieveLexer': ('pygments.lexers.sieve', 'Sieve', ('sieve',), ('*.siv', '*.sieve'), ()),
'SilverLexer': ('pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()),
'SingularityLexer': ('pygments.lexers.configs', 'Singularity', ('singularity',), ('*.def', 'Singularity'), ()),
- 'SlashLexer': ('pygments.lexers.slash', 'Slash', ('slash',), ('*.sl',), ()),
+ 'SlashLexer': ('pygments.lexers.slash', 'Slash', ('slash',), ('*.sla',), ()),
'SlimLexer': ('pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)),
'SlurmBashLexer': ('pygments.lexers.shell', 'Slurm', ('slurm', 'sbatch'), ('*.sl',), ()),
'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
]
}
+ def analyse_text(text):
+ """This is only used to disambiguate between ActionScript and
+ ActionScript3. We return 0 here; the ActionScript3 lexer will match
+ AS3 variable definitions and that will hopefully suffice."""
+ return 0
class ActionScript3Lexer(RegexLexer):
"""
'cleanupret', 'cmpxchg', 'cold', 'coldcc', 'comdat', 'common', 'constant',
'contract', 'convergent', 'critical', 'cxx_fast_tlscc', 'datalayout', 'declare',
'default', 'define', 'deplibs', 'dereferenceable', 'dereferenceable_or_null',
- 'distinct', 'dllexport', 'dllimport', 'double', 'dso_local', 'dso_preemptable',
+ 'distinct', 'dllexport', 'dllimport', 'dso_local', 'dso_preemptable',
'dsoLocal', 'eq', 'exact', 'exactmatch', 'extern_weak', 'external',
'externally_initialized', 'extractelement', 'extractvalue', 'fadd', 'false',
- 'fast', 'fastcc', 'fcmp', 'fdiv', 'fence', 'filter', 'flags', 'float', 'fmul',
- 'fp128', 'fpext', 'fptosi', 'fptoui', 'fptrunc', 'frem', 'from', 'fsub',
+ 'fast', 'fastcc', 'fcmp', 'fdiv', 'fence', 'filter', 'flags', 'fmul',
+ 'fpext', 'fptosi', 'fptoui', 'fptrunc', 'freeze', 'frem', 'from', 'fsub',
'funcFlags', 'function', 'gc', 'getelementptr', 'ghccc', 'global', 'guid', 'gv',
- 'half', 'hash', 'hhvm_ccc', 'hhvmcc', 'hidden', 'hot', 'hotness', 'icmp',
+ 'hash', 'hhvm_ccc', 'hhvmcc', 'hidden', 'hot', 'hotness', 'icmp',
'ifunc', 'inaccessiblemem_or_argmemonly', 'inaccessiblememonly', 'inalloca',
'inbounds', 'indir', 'indirectbr', 'info', 'initialexec', 'inline',
'inlineBits', 'inlinehint', 'inrange', 'inreg', 'insertelement', 'insertvalue',
'insts', 'intel_ocl_bicc', 'inteldialect', 'internal', 'inttoptr', 'invoke',
- 'jumptable', 'kind', 'label', 'landingpad', 'largest', 'linkage', 'linkonce',
+ 'jumptable', 'kind', 'landingpad', 'largest', 'linkage', 'linkonce',
'linkonce_odr', 'live', 'load', 'local_unnamed_addr', 'localdynamic',
'localexec', 'lshr', 'max', 'metadata', 'min', 'minsize', 'module', 'monotonic',
'msp430_intrcc', 'mul', 'musttail', 'naked', 'name', 'nand', 'ne', 'nest',
'nonlazybind', 'nonnull', 'norecurse', 'noRecurse', 'noredzone', 'noreturn',
'notail', 'notEligibleToImport', 'nounwind', 'nsw', 'nsz', 'null', 'nuw', 'oeq',
'offset', 'oge', 'ogt', 'ole', 'olt', 'one', 'opaque', 'optforfuzzing',
- 'optnone', 'optsize', 'or', 'ord', 'path', 'personality', 'phi', 'ppc_fp128',
+ 'optnone', 'optsize', 'or', 'ord', 'path', 'personality', 'phi',
'prefix', 'preserve_allcc', 'preserve_mostcc', 'private', 'prologue',
'protected', 'ptrtoint', 'ptx_device', 'ptx_kernel', 'readnone', 'readNone',
'readonly', 'readOnly', 'reassoc', 'refs', 'relbf', 'release', 'resByArg',
'unordered', 'unreachable', 'unsat', 'unwind', 'urem', 'uselistorder',
'uselistorder_bb', 'uwtable', 'va_arg', 'variable', 'vFuncId',
'virtualConstProp', 'void', 'volatile', 'weak', 'weak_odr', 'webkit_jscc',
- 'win64cc', 'within', 'wpdRes', 'wpdResolutions', 'writeonly', 'x',
- 'x86_64_sysvcc', 'x86_fastcallcc', 'x86_fp80', 'x86_intrcc', 'x86_mmx',
+ 'win64cc', 'within', 'wpdRes', 'wpdResolutions', 'writeonly',
+ 'x86_64_sysvcc', 'x86_fastcallcc', 'x86_intrcc', 'x86_mmx',
'x86_regcallcc', 'x86_stdcallcc', 'x86_thiscallcc', 'x86_vectorcallcc', 'xchg',
'xor', 'zeroext', 'zeroinitializer', 'zext', 'immarg', 'willreturn'),
suffix=r'\b'), Keyword),
# Types
- (words(('void', 'half', 'float', 'double', 'x86_fp80', 'fp128',
- 'ppc_fp128', 'label', 'metadata', 'token')), Keyword.Type),
+ (words(('void', 'half', 'bfloat', 'float', 'double', 'fp128',
+ 'x86_fp80', 'ppc_fp128', 'label', 'metadata', 'token')),
+ Keyword.Type),
# Integer types
- (r'i[1-9]\d*', Keyword)
+ (r'i[1-9]\d*', Keyword.Type)
]
}
],
}
+ def analyse_text(text):
+ """Try to identify OpenEdge ABL based on a few common constructs."""
+ result = 0
+
+ if 'END.' in text:
+ result += 0.05
+
+ if 'END PROCEDURE.' in text:
+ result += 0.05
+
+ if 'ELSE DO:' in text:
+ result += 0.05
+
+ return result
+
class GoodDataCLLexer(RegexLexer):
"""
(r'(.+?(?=^\s*%))|(.*)', using(BashLexer), '#pop'),
],
}
+
+ def analyse_text(text):
+ """This is a quite simple script file, but there are a few keywords
+ which seem unique to this language."""
+ result = 0
+ if re.search(r'\b(?:osversion|includecmd|mirrorurl)\b', text, re.IGNORECASE):
+ result += 0.5
+
+ if re.search(SingularityLexer._section[1:], text):
+ result += 0.49
+
+ return result
RegexLexer.__init__(self, **options)
+ def analyse_text(text):
+ """Nemerle is quite similar to Python, but @if is relatively uncommon
+ elsewhere."""
+ result = 0
+
+ if '@if' in text:
+ result += 0.1
+
+ return result
+
class BooLexer(RegexLexer):
"""
(r'"', String),
],
}
+
+ def analyse_text(text):
+ """F# doesn't have that many unique features -- |> and <| are weak
+ indicators."""
+ result = 0
+ if '|>' in text:
+ result += 0.05
+ if '<|' in text:
+ result += 0.05
+
+ return result
(r'[^"\']+', String),
],
}
+
+ def analyse_text(text):
+ """This is very difficult to guess relative to other business languages.
+ <- in conjuction with BEGIN/END seems relatively rare though."""
+ result = 0
+
+ if '<-' in text:
+ result += 0.01
+ if 'BEGIN' in text:
+ result += 0.01
+ if 'END' in text:
+ result += 0.01
+
+ return result
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
+import re
+
__all__ = ['BrainfuckLexer', 'BefungeLexer', 'RedcodeLexer', 'CAmkESLexer',
'CapDLLexer', 'AheuiLexer']
]
}
+ def analyse_text(text):
+ """It's safe to assume that a program which mostly consists of + -
+ and < > is brainfuck."""
+ plus_minus_count = 0
+ greater_less_count = 0
+
+ range_to_check = max(256, len(text))
+
+ for c in text[:range_to_check]:
+ if c == '+' or c == '-':
+ plus_minus_count += 1
+ if c == '<' or c == '>':
+ greater_less_count += 1
+
+ if plus_minus_count > (0.25 * range_to_check):
+ return 1.0
+ if greater_less_count > (0.25 * range_to_check):
+ return 1.0
+
+ result = 0
+ if '[-]' in text:
+ result += 0.5
+
+ return result
+
class BefungeLexer(RegexLexer):
"""
]
}
+ def analyse_text(text):
+ """This language uses Tamil-script. We'll assume that if there's a
+ decent amount of Tamil-characters, it's this language. This assumption
+ is obviously horribly off if someone uses string literals in tamil
+ in another language."""
+ if len(re.findall(r'[\u0b80-\u0bff]', text)) > 10:
+ return 0.25
+
def __init__(self, **options):
super().__init__(**options)
self.encoding = options.get('encoding', 'utf-8')
(r'[^"]+', String, '#pop'),
],
}
+
+ def analyse_text(text):
+ """Forth uses : COMMAND ; quite a lot in a single line, so we're trying
+ to find that."""
+ if re.search('\n:[^\n]+;\n', text):
+ return 0.1
pygments.lexers.grammar_notation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Lexers for grammer notations like BNF.
+ Lexers for grammar notations like BNF.
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
class BnfLexer(RegexLexer):
"""
- This lexer is for grammer notations which are similar to
+ This lexer is for grammar notations which are similar to
original BNF.
In order to maximize a number of targets of this lexer,
(r'\s+', Text),
]
}
+
+ def analyse_text(text):
+ """POVRAY is similar to JSON/C, but the combination of camera and
+ light_source is probably not very likely elsewhere. HLSL or GLSL
+ are similar (GLSL even has #version), but they miss #declare, and
+ light_source/camera are not keywords anywhere else -- it's fair
+ to assume though that any POVRAY scene must have a camera and
+ lightsource."""
+ result = 0
+ if '#version' in text:
+ result += 0.05
+ if '#declare' in text:
+ result += 0.05
+ if 'camera' in text:
+ result += 0.05
+ if 'light_source' in text:
+ result += 0.1
+
+ return result
]
}
+ def analyse_text(text):
+ """Verilog code will use one of reg/wire/assign for sure, and that
+ is not common elsewhere."""
+ result = 0
+ if 'reg' in text:
+ result += 0.1
+ if 'wire' in text:
+ result += 0.1
+ if 'assign' in text:
+ result += 0.1
+
+ return result
+
class SystemVerilogLexer(RegexLexer):
"""
_RESERVED = (
'and', 'begin', 'break', 'case', 'common', 'compile_opt',
- 'continue', 'do', 'else', 'end', 'endcase', 'elseelse',
+ 'continue', 'do', 'else', 'end', 'endcase', 'endelse',
'endfor', 'endforeach', 'endif', 'endrep', 'endswitch',
'endwhile', 'eq', 'for', 'foreach', 'forward_function',
'function', 'ge', 'goto', 'gt', 'if', 'inherits', 'le',
(r'.', Text),
]
}
+
+ def analyse_text(text):
+ """endelse seems to be unique to IDL, endswitch is rare at least."""
+ result = 0
+
+ if 'endelse' in text:
+ result += 0.2
+ if 'endswitch' in text:
+ result += 0.01
+
+ return result
\ No newline at end of file
while objectloop_queue:
yield objectloop_queue.pop(0)
+ def analyse_text(text):
+ """We try to find a keyword which seem relatively common, unfortunately
+ there is a decent overlap with Smalltalk keywords otherwise here.."""
+ result = 0
+ if re.search('\borigsource\b', text, re.IGNORECASE):
+ result += 0.05
+
+ return result
+
class Inform7Lexer(RegexLexer):
"""
else:
token = Comment
yield index, token, value
+
+ def analyse_text(text):
+ """This is a rather generic descriptive language without strong
+ identifiers. It looks like a 'GameMainDef' has to be present,
+ and/or a 'versionInfo' with an 'IFID' field."""
+ result = 0
+ if '__TADS' in text or 'GameMainDef' in text:
+ result += 0.2
+
+ # This is a fairly unique keyword which is likely used in source as well
+ if 'versionInfo' in text and 'IFID' in text:
+ result += 0.1
+
+ return result
],
}
+ def analyse_text(text):
+ """Octave is quite hard to spot, and it looks like Matlab as well."""
+ return 0
+
class ScilabLexer(RegexLexer):
"""
# return result
yield index, token, value
+
+ def analyse_text(text):
+ """It's Pascal-like, but does not use FUNCTION -- uses PROCEDURE
+ instead."""
+
+ # Check if this looks like Pascal, if not, bail out early
+ if not ('(*' in text and '*)' in text and ':=' in text):
+ return
+
+ result = 0
+ # Procedure is in Modula2
+ if re.search(r'\bPROCEDURE\b', text):
+ result += 0.6
+
+ # FUNCTION is only valid in Pascal, but not in Modula2
+ if re.search(r'\bFUNCTION\b', text):
+ result = 0.0
+
+ return result
(r'(TRUE|FALSE|NIL|INF)\b', Keyword.Constant),
]
}
+
+ def analyse_text(text):
+ """The only other lexer using .cp is the C++ one, so we check if for
+ a few common Pascal keywords here. Those are unfortunately quite
+ common across various business languages as well."""
+ result = 0
+ if 'BEGIN' in text:
+ result += 0.01
+ if 'END' in text:
+ result += 0.01
+ if 'PROCEDURE' in text:
+ result += 0.01
+ if 'END' in text:
+ result += 0.01
+
+ return result
(r'.*?\n', Comment),
]
}
+
+ def analyse_text(text):
+ """This is basically C. There is a keyword which doesn't exist in C
+ though and is nearly unique to this language."""
+ if 'tagof' in text:
+ return 0.01
def analyse_text(text):
if shebang_matches(text, r'perl'):
return True
+
+ result = 0
+
if re.search(r'(?:my|our)\s+[$@%(]', text):
- return 0.9
+ result += 0.9
+
+ if ':=' in text:
+ # := is not valid Perl, but it appears in unicon, so we should
+ # become less confident if we think we found Perl with :=
+ result /= 2
+
+ return result
class Perl6Lexer(ExtendedRegexLexer):
continue
break
+ if ':=' in text:
+ # Same logic as above for PerlLexer
+ rating /= 2
+
return rating
def __init__(self, **options):
bygroups(Text, Comment, Text)), # for doctests...
(r'^([^:]+)(: )(.+)(\n)',
bygroups(Generic.Error, Text, Name, Text), '#pop'),
- (r'^([a-zA-Z_]\w*)(:?\n)',
+ (r'^([a-zA-Z_][\w.]*)(:?\n)',
bygroups(Generic.Error, Text), '#pop')
],
}
class SparqlLexer(RegexLexer):
"""
- Lexer for `SPARQL <http://www.w3.org/TR/rdf-sparql-query/>`_ query language.
+ Lexer for `SPARQL <https://www.w3.org/TR/sparql11-query/>`_ query language.
.. versionadded:: 2.0
"""
# keywords ::
(r'(?i)(select|construct|describe|ask|where|filter|group\s+by|minus|'
r'distinct|reduced|from\s+named|from|order\s+by|desc|asc|limit|'
- r'offset|bindings|load|clear|drop|create|add|move|copy|'
- r'insert\s+data|delete\s+data|delete\s+where|delete|insert|'
+ r'offset|values|bindings|load|into|clear|drop|create|add|move|copy|'
+ r'insert\s+data|delete\s+data|delete\s+where|with|delete|insert|'
r'using\s+named|using|graph|default|named|all|optional|service|'
- r'silent|bind|union|not\s+in|in|as|having|to|prefix|base)\b', Keyword),
+ r'silent|bind|undef|union|not\s+in|in|as|having|to|prefix|base)\b', Keyword),
(r'(a)\b', Keyword),
# IRIs ::
('(' + IRIREF + ')', Name.Label),
(r'(?i)(str|lang|langmatches|datatype|bound|iri|uri|bnode|rand|abs|'
r'ceil|floor|round|concat|strlen|ucase|lcase|encode_for_uri|'
r'contains|strstarts|strends|strbefore|strafter|year|month|day|'
- r'hours|minutes|seconds|timezone|tz|now|md5|sha1|sha256|sha384|'
+ r'hours|minutes|seconds|timezone|tz|now|uuid|struuid|md5|sha1|sha256|sha384|'
r'sha512|coalesce|if|strlang|strdt|sameterm|isiri|isuri|isblank|'
r'isliteral|isnumeric|regex|substr|replace|exists|not\s+exists|'
r'count|sum|min|max|avg|sample|group_concat|separator)\b',
(r'.', Text),
],
}
+
+ def analyse_text(text):
+ """This is very similar to markdown, save for the escape characters
+ needed for * and _."""
+ result = 0
+
+ if '\\*' in text:
+ result += 0.01
+
+ if '\\_' in text:
+ result += 0.01
+
+ return result
],
}
+ def analyse_text(text):
+ """public method and private method don't seem to be quite common
+ elsewhere."""
+ result = 0
+ if re.search(r'\b(?:public|private)\s+method\b', text):
+ result += 0.01
+ return result
+
+
class EasytrieveLexer(RegexLexer):
"""
name = 'Slash'
aliases = ['slash']
- filenames = ['*.sl']
+ filenames = ['*.sla']
def __init__(self, **options):
from pygments.lexers.web import HtmlLexer
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
+
+ def analyse_text(text):
+ """We're searching for a common function and a unique keyword here."""
+ if 'SinOsc' in text or 'thisFunctionDef' in text:
+ return 0.1
],
}
+ def analyse_text(text):
+ """Evoque templates use $evoque, which is unique."""
+ if '$evoque' in text:
+ return 1
class EvoqueHtmlLexer(DelegatingLexer):
"""
def __init__(self, **options):
super().__init__(HtmlLexer, EvoqueLexer, **options)
+ def analyse_text(text):
+ return EvoqueLexer.analyse_text(text)
+
class EvoqueXmlLexer(DelegatingLexer):
"""
def __init__(self, **options):
super().__init__(XmlLexer, EvoqueLexer, **options)
+ def analyse_text(text):
+ return EvoqueLexer.analyse_text(text)
+
class ColdfusionLexer(RegexLexer):
"""
}
def analyse_text(text):
- if text.startswith('(*'):
- return True
+ if 'qed' in text and 'tauto' in text:
+ return 1
class IsabelleLexer(RegexLexer):
from pygments.lexer import Lexer
from pygments.token import Text, Comment, Operator, Keyword, Name, Number, \
- Punctuation, Error
+ Punctuation, Error
__all__ = ['TNTLexer']
LINENOS = re.compile(r'(?:[0-9]+)(?:(?:, ?|,? and )(?:[0-9]+))*')
COMMENT = re.compile(r'\[[^\n\]]+\]')
+ def __init__(self, *args, **kwargs):
+ Lexer.__init__(self, *args, **kwargs)
+ self.cur = []
+
def whitespace(self, start, text, required=False):
"""Tokenize whitespace."""
end = start
def formula(self, start, text):
"""Tokenize a formula."""
- if text[start] in '[]': # fantasy push or pop
- self.cur.append((start, Keyword, text[start]))
- return start+1
if text[start] in self.NEGATORS: # ~<...>
end = start+1
while text[end] in self.NEGATORS:
return match.end()
def lineno(self, start, text):
- """Tokenize a line marker."""
+ """Tokenize a line referral."""
end = start
while text[end] not in self.NUMBERS:
end += 1
self.cur = []
start = end = self.whitespace(0, text)
while start <= end < len(text):
- # try line number
- while text[end] in self.NUMBERS:
- end += 1
- if end != start: # actual number present
- self.cur.append((start, Number.Integer, text[start:end]))
- # whitespace is required after a line number
+ try:
+ # try line number
+ while text[end] in self.NUMBERS:
+ end += 1
+ if end != start: # actual number present
+ self.cur.append((start, Number.Integer, text[start:end]))
+ # whitespace is required after a line number
+ orig = len(self.cur)
+ try:
+ start = end = self.whitespace(end, text, True)
+ except AssertionError:
+ del self.cur[orig:]
+ start = end = self.error_till_line_end(end, text)
+ continue
+ # at this point it could be a comment
+ match = self.COMMENT.match(text, start)
+ if match is not None:
+ self.cur.append((start, Comment, text[start:match.end()]))
+ start = end = match.end()
+ # anything after the closing bracket is invalid
+ start = end = self.error_till_line_end(start, text)
+ # do not attempt to process the rest
+ continue
+ del match
+ if text[start] in '[]': # fantasy push or pop
+ self.cur.append((start, Keyword, text[start]))
+ start += 1
+ end += 1
+ else:
+ # one formula, possibly containing subformulae
+ orig = len(self.cur)
+ try:
+ start = end = self.formula(start, text)
+ except AssertionError: # not well-formed
+ del self.cur[orig:]
+ while text[end] not in self.WHITESPACE:
+ end += 1
+ self.cur.append((start, Error, text[start:end]))
+ start = end
+ # skip whitespace after formula
orig = len(self.cur)
try:
start = end = self.whitespace(end, text, True)
except AssertionError:
del self.cur[orig:]
- start = end = self.error_till_line_end(end, text)
+ start = end = self.error_till_line_end(start, text)
continue
- # at this point it could be a comment
- match = self.COMMENT.match(text, start)
- if match is not None:
- self.cur.append((start, Comment, text[start:match.end()]))
- start = end = match.end()
- # anything after the closing bracket is invalid
- start = end = self.error_till_line_end(start, text)
- # do not attempt to process the rest
- continue
- del match
- # one formula, possibly containing subformulae
- orig = len(self.cur)
- try:
- start = end = self.formula(start, text)
- except AssertionError: # not well-formed
- del self.cur[orig:]
- while text[end] not in self.WHITESPACE:
- end += 1
- self.cur.append((start, Error, text[start:end]))
- start = end
- # skip whitespace after formula
- orig = len(self.cur)
- try:
- start = end = self.whitespace(end, text, True)
- except AssertionError:
- del self.cur[orig:]
- start = end = self.error_till_line_end(start, text)
- continue
- # rule proving this formula a theorem
- orig = len(self.cur)
- try:
- start = end = self.rule(start, text)
- except AssertionError:
- del self.cur[orig:]
- start = end = self.error_till_line_end(start, text)
- continue
- # skip whitespace after rule
- start = end = self.whitespace(end, text)
- # line marker
- if text[start] == '(':
+ # rule proving this formula a theorem
orig = len(self.cur)
try:
- start = end = self.lineno(start, text)
+ start = end = self.rule(start, text)
except AssertionError:
del self.cur[orig:]
start = end = self.error_till_line_end(start, text)
continue
- start = end = self.whitespace(start, text)
+ # skip whitespace after rule
+ start = end = self.whitespace(end, text)
+ # line marker
+ if text[start] == '(':
+ orig = len(self.cur)
+ try:
+ start = end = self.lineno(start, text)
+ except AssertionError:
+ del self.cur[orig:]
+ start = end = self.error_till_line_end(start, text)
+ continue
+ start = end = self.whitespace(start, text)
+ except IndexError:
+ try:
+ del self.cur[orig:]
+ except NameError:
+ pass # if orig was never defined, fine
+ self.error_till_line_end(start, text)
return self.cur
(r'[\w-]+', Text),
],
}
+
+ def analyse_text(text):
+ """endsuspend and endrepeat are unique to this language, and
+ \\self, /self doesn't seem to get used anywhere else either."""
+ result = 0
+
+ if 'endsuspend' in text:
+ result += 0.1
+
+ if 'endrepeat' in text:
+ result += 0.1
+
+ if ':=' in text:
+ result += 0.01
+
+ if 'procedure' in text and 'end' in text:
+ result += 0.01
+
+ # This seems quite unique to unicon -- doesn't appear in any other
+ # example source we have (A quick search reveals that \SELF appears in
+ # Perl/Raku code)
+ if r'\self' in text and r'/self' in text:
+ result += 0.5
+
+ return result
(r'[*/]', Comment.Multiline),
]
}
+
+ def analyse_text(text):
+ """This is fairly similar to C and others, but freezeif and
+ waituntil are unique keywords."""
+ result = 0
+
+ if 'freezeif' in text:
+ result += 0.05
+
+ if 'waituntil' in text:
+ result += 0.05
+
+ return result
def get_all_styles():
- """Return an generator for all styles by name,
+ """Return a generator for all styles by name,
both builtin and plugin."""
yield from STYLE_MAP
for name, _ in find_plugin_styles():
+++ /dev/null
-[pytest]
-filterwarnings =
- error::FutureWarning
- error::DeprecationWarning
+++ /dev/null
-pytest-cov
-pytest-randomly
-pytest
-pyflakes
-pylint
-tox
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+ Checker for line endings
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Make sure Python (.py) and Bash completition (.bashcomp) files do not
+ contain CR/LF newlines.
+
+ :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import sys
+import os
+
+if __name__ == '__main__':
+ for directory in sys.argv[1:]:
+ if not os.path.exists(directory):
+ continue
+
+ for root, dirs, files in os.walk(directory):
+ for filename in files:
+ if not filename.endswith('.py') and not filename.endswith('.bashcomp'):
+ continue
+
+ full_path = os.path.join(root, filename)
+ with open(full_path, 'rb') as f:
+ if b'\r\n' in f.read():
+ print('CR/LF found in', full_path)
+ sys.exit(1)
+
+ sys.exit(0)
+++ /dev/null
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-"""
- Lexing error finder
- ~~~~~~~~~~~~~~~~~~~
-
- For the source files given on the command line, display
- the text where Error tokens are being generated, along
- with some context.
-
- :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import os
-import sys
-
-# always prefer Pygments from source if exists
-srcpath = os.path.join(os.path.dirname(__file__), '..')
-if os.path.isdir(os.path.join(srcpath, 'pygments')):
- sys.path.insert(0, srcpath)
-
-
-from pygments.lexer import RegexLexer, ExtendedRegexLexer, LexerContext, \
- ProfilingRegexLexer, ProfilingRegexLexerMeta
-from pygments.lexers import get_lexer_by_name, find_lexer_class, \
- find_lexer_class_for_filename
-from pygments.token import Error, Text, _TokenType
-from pygments.cmdline import _parse_options
-
-
-class DebuggingRegexLexer(ExtendedRegexLexer):
- """Make the state stack, position and current match instance attributes."""
-
- def get_tokens_unprocessed(self, text, stack=('root',)):
- """
- Split ``text`` into (tokentype, text) pairs.
-
- ``stack`` is the inital stack (default: ``['root']``)
- """
- tokendefs = self._tokens
- self.ctx = ctx = LexerContext(text, 0)
- ctx.stack = list(stack)
- statetokens = tokendefs[ctx.stack[-1]]
- while 1:
- for rexmatch, action, new_state in statetokens:
- self.m = m = rexmatch(text, ctx.pos, ctx.end)
- if m:
- if action is not None:
- if type(action) is _TokenType:
- yield ctx.pos, action, m.group()
- ctx.pos = m.end()
- else:
- if not isinstance(self, ExtendedRegexLexer):
- yield from action(self, m)
- ctx.pos = m.end()
- else:
- yield from action(self, m, ctx)
- if not new_state:
- # altered the state stack?
- statetokens = tokendefs[ctx.stack[-1]]
- if new_state is not None:
- # state transition
- if isinstance(new_state, tuple):
- for state in new_state:
- if state == '#pop':
- ctx.stack.pop()
- elif state == '#push':
- ctx.stack.append(ctx.stack[-1])
- else:
- ctx.stack.append(state)
- elif isinstance(new_state, int):
- # pop
- del ctx.stack[new_state:]
- elif new_state == '#push':
- ctx.stack.append(ctx.stack[-1])
- else:
- assert False, 'wrong state def: %r' % new_state
- statetokens = tokendefs[ctx.stack[-1]]
- break
- else:
- try:
- if ctx.pos >= ctx.end:
- break
- if text[ctx.pos] == '\n':
- # at EOL, reset state to 'root'
- ctx.stack = ['root']
- statetokens = tokendefs['root']
- yield ctx.pos, Text, '\n'
- ctx.pos += 1
- continue
- yield ctx.pos, Error, text[ctx.pos]
- ctx.pos += 1
- except IndexError:
- break
-
-
-def main(fn, lexer=None, options={}):
- if lexer is not None:
- lxcls = get_lexer_by_name(lexer).__class__
- else:
- lxcls = find_lexer_class_for_filename(os.path.basename(fn))
- if lxcls is None:
- name, rest = fn.split('_', 1)
- lxcls = find_lexer_class(name)
- if lxcls is None:
- raise AssertionError('no lexer found for file %r' % fn)
- print('Using lexer: %s (%s.%s)' % (lxcls.name, lxcls.__module__,
- lxcls.__name__))
- debug_lexer = False
- # if profile:
- # # does not work for e.g. ExtendedRegexLexers
- # if lxcls.__bases__ == (RegexLexer,):
- # # yes we can! (change the metaclass)
- # lxcls.__class__ = ProfilingRegexLexerMeta
- # lxcls.__bases__ = (ProfilingRegexLexer,)
- # lxcls._prof_sort_index = profsort
- # else:
- # if lxcls.__bases__ == (RegexLexer,):
- # lxcls.__bases__ = (DebuggingRegexLexer,)
- # debug_lexer = True
- # elif lxcls.__bases__ == (DebuggingRegexLexer,):
- # # already debugged before
- # debug_lexer = True
- # else:
- # # HACK: ExtendedRegexLexer subclasses will only partially work here.
- # lxcls.__bases__ = (DebuggingRegexLexer,)
- # debug_lexer = True
-
- lx = lxcls(**options)
- lno = 1
- if fn == '-':
- text = sys.stdin.read()
- else:
- with open(fn, 'rb') as fp:
- text = fp.read().decode('utf-8')
- text = text.strip('\n') + '\n'
- tokens = []
- states = []
-
- def show_token(tok, state):
- reprs = list(map(repr, tok))
- print(' ' + reprs[1] + ' ' + ' ' * (29-len(reprs[1])) + reprs[0], end=' ')
- if debug_lexer:
- print(' ' + ' ' * (29-len(reprs[0])) + ' : '.join(state) if state else '', end=' ')
- print()
-
- for type, val in lx.get_tokens(text):
- lno += val.count('\n')
- if type == Error and not ignerror:
- print('Error parsing', fn, 'on line', lno)
- if not showall:
- print('Previous tokens' + (debug_lexer and ' and states' or '') + ':')
- for i in range(max(len(tokens) - num, 0), len(tokens)):
- if debug_lexer:
- show_token(tokens[i], states[i])
- else:
- show_token(tokens[i], None)
- print('Error token:')
- l = len(repr(val))
- print(' ' + repr(val), end=' ')
- if debug_lexer and hasattr(lx, 'ctx'):
- print(' ' * (60-l) + ' : '.join(lx.ctx.stack), end=' ')
- print()
- print()
- return 1
- tokens.append((type, val))
- if debug_lexer:
- if hasattr(lx, 'ctx'):
- states.append(lx.ctx.stack[:])
- else:
- states.append(None)
- if showall:
- show_token((type, val), states[-1] if debug_lexer else None)
- return 0
-
-
-def print_help():
- print('''\
-Pygments development helper to quickly debug lexers.
-
- scripts/debug_lexer.py [options] file ...
-
-Give one or more filenames to lex them and display possible error tokens
-and/or profiling info. Files are assumed to be encoded in UTF-8.
-
-Selecting lexer and options:
-
- -l NAME use lexer named NAME (default is to guess from
- the given filenames)
- -O OPTIONSTR use lexer options parsed from OPTIONSTR
-
-Debugging lexing errors:
-
- -n N show the last N tokens on error
- -a always show all lexed tokens (default is only
- to show them when an error occurs)
- -e do not stop on error tokens
-
-Profiling:
-
- -p use the ProfilingRegexLexer to profile regexes
- instead of the debugging lexer
- -s N sort profiling output by column N (default is
- column 4, the time per call)
-''')
-
-num = 10
-showall = False
-ignerror = False
-lexer = None
-options = {}
-profile = False
-profsort = 4
-
-if __name__ == '__main__':
- import getopt
- opts, args = getopt.getopt(sys.argv[1:], 'n:l:aepO:s:h')
- for opt, val in opts:
- if opt == '-n':
- num = int(val)
- elif opt == '-a':
- showall = True
- elif opt == '-e':
- ignerror = True
- elif opt == '-l':
- lexer = val
- elif opt == '-p':
- profile = True
- elif opt == '-s':
- profsort = int(val)
- elif opt == '-O':
- options = _parse_options([val])
- elif opt == '-h':
- print_help()
- sys.exit(0)
- ret = 0
- if not args:
- print_help()
- for f in args:
- ret += main(f, lexer, options)
- sys.exit(bool(ret))
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
+ 'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: OS Independent',
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unsupported operand type(s) for /: 'int' and 'str'
+>>> blah
+Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+builtins.NameError
<div class="highlight" style="background: #f8f8f8">
- <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">1</span><span style="color: #408080; font-style: italic"># a</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">2</span><span style="color: #408080; font-style: italic"># b</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">3</span><span style="color: #408080; font-style: italic"># c</span>
+ <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">1</span><span style="color: #408080; font-style: italic"># a</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">2</span><span style="color: #408080; font-style: italic"># b</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">3</span><span style="color: #408080; font-style: italic"># c</span>
</pre>
</div>
<div class="highlight" style="background: #f8f8f8">
- <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">1</span><span style="color: #408080; font-style: italic"># a</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">2</span><span style="color: #408080; font-style: italic"># b</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">3</span><span style="color: #408080; font-style: italic"># c</span>
+ <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">1</span><span style="color: #408080; font-style: italic"># a</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">2</span><span style="color: #408080; font-style: italic"># b</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">3</span><span style="color: #408080; font-style: italic"># c</span>
</pre>
</div>
<div class="highlight" style="background: #f8f8f8">
- <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">1</span><span style="color: #408080; font-style: italic"># a</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">2</span><span style="color: #408080; font-style: italic"># b</span>
-<span style="color: #000000; background-color: #ffffc0; padding: 0 5px 0 5px;">3</span><span style="color: #408080; font-style: italic"># c</span>
+ <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">1</span><span style="color: #408080; font-style: italic"># a</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">2</span><span style="color: #408080; font-style: italic"># b</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;">3</span><span style="color: #408080; font-style: italic"># c</span>
</pre>
</div>
<div class="highlight" style="background: #f8f8f8">
- <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">1</span><span style="color: #408080; font-style: italic"># a</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">2</span><span style="color: #408080; font-style: italic"># b</span>
-<span style="color: #000000; background-color: #ffffc0; padding: 0 5px 0 5px;">3</span><span style="color: #408080; font-style: italic"># c</span>
+ <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">1</span><span style="color: #408080; font-style: italic"># a</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">2</span><span style="color: #408080; font-style: italic"># b</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;">3</span><span style="color: #408080; font-style: italic"># c</span>
</pre>
</div>
<div class="highlight" style="background: #f8f8f8">
- <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> 8</span><span style="color: #408080; font-style: italic"># a</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> 9</span><span style="color: #408080; font-style: italic"># b</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">10</span><span style="color: #408080; font-style: italic"># c</span>
+ <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> 8</span><span style="color: #408080; font-style: italic"># a</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> 9</span><span style="color: #408080; font-style: italic"># b</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">10</span><span style="color: #408080; font-style: italic"># c</span>
</pre>
</div>
<div class="highlight" style="background: #f8f8f8">
- <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> 8</span><span style="color: #408080; font-style: italic"># a</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> 9</span><span style="color: #408080; font-style: italic"># b</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">10</span><span style="color: #408080; font-style: italic"># c</span>
+ <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> 8</span><span style="color: #408080; font-style: italic"># a</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> 9</span><span style="color: #408080; font-style: italic"># b</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">10</span><span style="color: #408080; font-style: italic"># c</span>
</pre>
</div>
<div class="highlight" style="background: #f8f8f8">
- <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> 8</span><span style="color: #408080; font-style: italic"># a</span>
-<span style="color: #000000; background-color: #ffffc0; padding: 0 5px 0 5px;"> 9</span><span style="color: #408080; font-style: italic"># b</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">10</span><span style="color: #408080; font-style: italic"># c</span>
+ <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> 8</span><span style="color: #408080; font-style: italic"># a</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> 9</span><span style="color: #408080; font-style: italic"># b</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">10</span><span style="color: #408080; font-style: italic"># c</span>
</pre>
</div>
<div class="highlight" style="background: #f8f8f8">
- <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> 8</span><span style="color: #408080; font-style: italic"># a</span>
-<span style="color: #000000; background-color: #ffffc0; padding: 0 5px 0 5px;"> 9</span><span style="color: #408080; font-style: italic"># b</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">10</span><span style="color: #408080; font-style: italic"># c</span>
+ <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> 8</span><span style="color: #408080; font-style: italic"># a</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> 9</span><span style="color: #408080; font-style: italic"># b</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">10</span><span style="color: #408080; font-style: italic"># c</span>
</pre>
</div>
<div class="highlight" style="background: #f8f8f8">
- <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> </span><span style="color: #408080; font-style: italic"># a</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">2</span><span style="color: #408080; font-style: italic"># b</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> </span><span style="color: #408080; font-style: italic"># c</span>
+ <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #408080; font-style: italic"># a</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">2</span><span style="color: #408080; font-style: italic"># b</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #408080; font-style: italic"># c</span>
</pre>
</div>
<div class="highlight" style="background: #f8f8f8">
- <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> </span><span style="color: #408080; font-style: italic"># a</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">2</span><span style="color: #408080; font-style: italic"># b</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> </span><span style="color: #408080; font-style: italic"># c</span>
+ <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #408080; font-style: italic"># a</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">2</span><span style="color: #408080; font-style: italic"># b</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #408080; font-style: italic"># c</span>
</pre>
</div>
<div class="highlight" style="background: #f8f8f8">
- <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> </span><span style="color: #408080; font-style: italic"># a</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">2</span><span style="color: #408080; font-style: italic"># b</span>
-<span style="color: #000000; background-color: #ffffc0; padding: 0 5px 0 5px;"> </span><span style="color: #408080; font-style: italic"># c</span>
+ <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #408080; font-style: italic"># a</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">2</span><span style="color: #408080; font-style: italic"># b</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #408080; font-style: italic"># c</span>
</pre>
</div>
<div class="highlight" style="background: #f8f8f8">
- <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> </span><span style="color: #408080; font-style: italic"># a</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">2</span><span style="color: #408080; font-style: italic"># b</span>
-<span style="color: #000000; background-color: #ffffc0; padding: 0 5px 0 5px;"> </span><span style="color: #408080; font-style: italic"># c</span>
+ <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #408080; font-style: italic"># a</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">2</span><span style="color: #408080; font-style: italic"># b</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #408080; font-style: italic"># c</span>
</pre>
</div>
<div class="highlight" style="background: #f8f8f8">
- <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> 8</span><span style="color: #408080; font-style: italic"># a</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> </span><span style="color: #408080; font-style: italic"># b</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">10</span><span style="color: #408080; font-style: italic"># c</span>
+ <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> 8</span><span style="color: #408080; font-style: italic"># a</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #408080; font-style: italic"># b</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">10</span><span style="color: #408080; font-style: italic"># c</span>
</pre>
</div>
<div class="highlight" style="background: #f8f8f8">
- <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> 8</span><span style="color: #408080; font-style: italic"># a</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> </span><span style="color: #408080; font-style: italic"># b</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">10</span><span style="color: #408080; font-style: italic"># c</span>
+ <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> 8</span><span style="color: #408080; font-style: italic"># a</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #408080; font-style: italic"># b</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">10</span><span style="color: #408080; font-style: italic"># c</span>
</pre>
</div>
<div class="highlight" style="background: #f8f8f8">
- <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> 8</span><span style="color: #408080; font-style: italic"># a</span>
-<span style="color: #000000; background-color: #ffffc0; padding: 0 5px 0 5px;"> </span><span style="color: #408080; font-style: italic"># b</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">10</span><span style="color: #408080; font-style: italic"># c</span>
+ <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> 8</span><span style="color: #408080; font-style: italic"># a</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #408080; font-style: italic"># b</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">10</span><span style="color: #408080; font-style: italic"># c</span>
</pre>
</div>
<div class="highlight" style="background: #f8f8f8">
- <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> 8</span><span style="color: #408080; font-style: italic"># a</span>
-<span style="color: #000000; background-color: #ffffc0; padding: 0 5px 0 5px;"> </span><span style="color: #408080; font-style: italic"># b</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">10</span><span style="color: #408080; font-style: italic"># c</span>
+ <pre style="line-height: 125%; margin: 0;"><span></span><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> 8</span><span style="color: #408080; font-style: italic"># a</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span><span style="color: #408080; font-style: italic"># b</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">10</span><span style="color: #408080; font-style: italic"># c</span>
</pre>
</div>
<tr>
<td class="linenos">
<div class="linenodiv">
- <pre><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"><a href="#-1">1</a></span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"><a href="#-2">2</a></span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"><a href="#-3">3</a></span></pre>
+ <pre><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"><a href="#-1">1</a></span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"><a href="#-2">2</a></span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"><a href="#-3">3</a></span></pre>
</div>
</td>
<td class="code">
<tr>
<td class="linenos">
<div class="linenodiv">
- <pre><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">1</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">2</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">3</span></pre>
+ <pre><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">1</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">2</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">3</span></pre>
</div>
</td>
<td class="code">
<tr>
<td class="linenos">
<div class="linenodiv">
- <pre><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"><a href="#-1">1</a></span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"><a href="#-2">2</a></span>
-<span style="color: #000000; background-color: #ffffc0; padding: 0 5px 0 5px;"><a href="#-3">3</a></span></pre>
+ <pre><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"><a href="#-1">1</a></span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"><a href="#-2">2</a></span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"><a href="#-3">3</a></span></pre>
</div>
</td>
<td class="code">
<tr>
<td class="linenos">
<div class="linenodiv">
- <pre><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">1</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">2</span>
-<span style="color: #000000; background-color: #ffffc0; padding: 0 5px 0 5px;">3</span></pre>
+ <pre><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">1</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">2</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;">3</span></pre>
</div>
</td>
<td class="code">
<tr>
<td class="linenos">
<div class="linenodiv">
- <pre><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"><a href="#-8"> 8</a></span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"><a href="#-9"> 9</a></span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"><a href="#-10">10</a></span></pre>
+ <pre><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"><a href="#-8"> 8</a></span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"><a href="#-9"> 9</a></span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"><a href="#-10">10</a></span></pre>
</div>
</td>
<td class="code">
<tr>
<td class="linenos">
<div class="linenodiv">
- <pre><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> 8</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> 9</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">10</span></pre>
+ <pre><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> 8</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> 9</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">10</span></pre>
</div>
</td>
<td class="code">
<tr>
<td class="linenos">
<div class="linenodiv">
- <pre><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"><a href="#-8"> 8</a></span>
-<span style="color: #000000; background-color: #ffffc0; padding: 0 5px 0 5px;"><a href="#-9"> 9</a></span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"><a href="#-10">10</a></span></pre>
+ <pre><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"><a href="#-8"> 8</a></span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"><a href="#-9"> 9</a></span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"><a href="#-10">10</a></span></pre>
</div>
</td>
<td class="code">
<tr>
<td class="linenos">
<div class="linenodiv">
- <pre><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> 8</span>
-<span style="color: #000000; background-color: #ffffc0; padding: 0 5px 0 5px;"> 9</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">10</span></pre>
+ <pre><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> 8</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> 9</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">10</span></pre>
</div>
</td>
<td class="code">
<tr>
<td class="linenos">
<div class="linenodiv">
- <pre><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> </span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"><a href="#-2">2</a></span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> </span></pre>
+ <pre><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"><a href="#-2">2</a></span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> </span></pre>
</div>
</td>
<td class="code">
<tr>
<td class="linenos">
<div class="linenodiv">
- <pre><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> </span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">2</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> </span></pre>
+ <pre><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">2</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> </span></pre>
</div>
</td>
<td class="code">
<tr>
<td class="linenos">
<div class="linenodiv">
- <pre><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> </span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"><a href="#-2">2</a></span>
-<span style="color: #000000; background-color: #ffffc0; padding: 0 5px 0 5px;"> </span></pre>
+ <pre><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"><a href="#-2">2</a></span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span></pre>
</div>
</td>
<td class="code">
<tr>
<td class="linenos">
<div class="linenodiv">
- <pre><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> </span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">2</span>
-<span style="color: #000000; background-color: #ffffc0; padding: 0 5px 0 5px;"> </span></pre>
+ <pre><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">2</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span></pre>
</div>
</td>
<td class="code">
<tr>
<td class="linenos">
<div class="linenodiv">
- <pre><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"><a href="#-8"> 8</a></span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> </span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"><a href="#-10">10</a></span></pre>
+ <pre><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"><a href="#-8"> 8</a></span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"><a href="#-10">10</a></span></pre>
</div>
</td>
<td class="code">
<tr>
<td class="linenos">
<div class="linenodiv">
- <pre><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> 8</span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> </span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">10</span></pre>
+ <pre><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> 8</span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">10</span></pre>
</div>
</td>
<td class="code">
<tr>
<td class="linenos">
<div class="linenodiv">
- <pre><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"><a href="#-8"> 8</a></span>
-<span style="color: #000000; background-color: #ffffc0; padding: 0 5px 0 5px;"> </span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"><a href="#-10">10</a></span></pre>
+ <pre><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"><a href="#-8"> 8</a></span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"><a href="#-10">10</a></span></pre>
</div>
</td>
<td class="code">
<tr>
<td class="linenos">
<div class="linenodiv">
- <pre><span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;"> 8</span>
-<span style="color: #000000; background-color: #ffffc0; padding: 0 5px 0 5px;"> </span>
-<span style="color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px;">10</span></pre>
+ <pre><span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;"> 8</span>
+<span style="color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px;"> </span>
+<span style="color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px;">10</span></pre>
</div>
</td>
<td class="code">
--- /dev/null
+# -*- coding: utf-8 -*-
+"""
+ Pygments basic API tests
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import random
+from io import StringIO, BytesIO
+from os import path
+
+import pytest
+
+from pygments import lexers, formatters, lex, format
+
+TESTDIR = path.dirname(path.abspath(__file__))
+
+def _guess_lexer_for_file(filename):
+ return lexers.guess_lexer(open(path.join(TESTDIR, 'examplefiles', filename), 'r', encoding='utf-8').read())
+
+@pytest.mark.skip(reason="This is identified as T-SQL")
+def test_guess_lexer_fsharp():
+ l = _guess_lexer_for_file('Deflate.rs')
+ assert l.__class__.__name__ == 'FSharpLexer'
+
+def test_guess_lexer_brainfuck():
+ l = lexers.guess_lexer(">>[-]<<[->>+<<]")
+ assert l.__class__.__name__ == 'BrainfuckLexer'
+
+def test_guess_lexer_singularity():
+ l = _guess_lexer_for_file('Singularity')
+ assert l.__class__.__name__ == 'SingularityLexer'
+
+@pytest.mark.skip(reason="This is identified as MIME")
+def test_guess_lexer_matlab():
+ l = lexers.guess_lexer(r'A \ B')
+ assert l.__class__.__name__ == 'OctaveLexer'
+
+@pytest.mark.skip(reason="This is identified as Python")
+def test_guess_lexer_hybris():
+ l = _guess_lexer_for_file('hybris_File.hy')
+ assert l.__class__.__name__ == 'HybrisLexer'
+
+def test_guess_lexer_forth():
+ l = _guess_lexer_for_file('demo.frt')
+ assert l.__class__.__name__ == 'ForthLexer'
+
+def test_guess_lexer_modula2():
+ l = _guess_lexer_for_file('modula2_test_cases.def')
+ assert l.__class__.__name__ == 'Modula2Lexer'
+
+def test_guess_lexer_unicon():
+ l = _guess_lexer_for_file('example.icn')
+ assert l.__class__.__name__ == 'UcodeLexer'
+
+def test_guess_lexer_ezhil():
+ l = _guess_lexer_for_file('ezhil_primefactors.n')
+ assert l.__class__.__name__ == 'EzhilLexer'
\ No newline at end of file
assert style_defs[1] == (
'td.linenos pre '
- '{ color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px; }'
+ '{ color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px; }'
)
assert style_defs[2] == (
'span.linenos '
- '{ color: #000000; background-color: #f0f0f0; padding: 0 5px 0 5px; }'
+ '{ color: #000000; background-color: #f0f0f0; padding-left: 5px; padding-right: 5px; }'
)
assert style_defs[1] == (
'td.linenos pre '
- '{ color: #ff0000; background-color: #0000ff; padding: 0 5px 0 5px; }'
+ '{ color: #ff0000; background-color: #0000ff; padding-left: 5px; padding-right: 5px; }'
)
assert style_defs[2] == (
'span.linenos '
- '{ color: #ff0000; background-color: #0000ff; padding: 0 5px 0 5px; }'
+ '{ color: #ff0000; background-color: #0000ff; padding-left: 5px; padding-right: 5px; }'
)
assert style_defs[3] == (
'td.linenos pre.special '
- '{ color: #00ff00; background-color: #ffffff; padding: 0 5px 0 5px; }'
+ '{ color: #00ff00; background-color: #ffffff; padding-left: 5px; padding-right: 5px; }'
)
assert style_defs[4] == (
'span.linenos.special '
- '{ color: #00ff00; background-color: #ffffff; padding: 0 5px 0 5px; }'
+ '{ color: #00ff00; background-color: #ffffff; padding-left: 5px; padding-right: 5px; }'
)
import os
import tempfile
from os import path
+from textwrap import dedent
import pytest
from pygments.formatters import LatexFormatter
-from pygments.lexers import PythonLexer
+from pygments.formatters.latex import LatexEmbeddedLexer
+from pygments.lexers import PythonLexer, PythonConsoleLexer
+from pygments.token import Token
TESTDIR = path.dirname(path.abspath(__file__))
TESTFILE = path.join(TESTDIR, 'test_latex_formatter.py')
os.unlink(pathname)
os.chdir(old_wd)
+
+
+def test_embedded_lexer():
+ # Latex surrounded by '|' should be Escaped
+ lexer = LatexEmbeddedLexer('|', '|', PythonConsoleLexer())
+
+ # similar to gh-1516
+ src = dedent("""\
+ >>> x = 1
+ >>> y = mul(x, |$z^2$|) # these |pipes| are untouched
+ >>> y
+ |$1 + z^2$|""")
+
+ assert list(lexer.get_tokens(src)) == [(Token.Name, ''),
+ (Token.Generic.Prompt, '>>> '),
+ (Token.Name, 'x'),
+ (Token.Text, ' '),
+ (Token.Operator, '='),
+ (Token.Text, ' '),
+ (Token.Literal.Number.Integer, '1'),
+ (Token.Text, '\n'),
+ (Token.Generic.Prompt, '>>> '),
+ (Token.Text, ''),
+ (Token.Name, 'y'),
+ (Token.Text, ' '),
+ (Token.Operator, '='),
+ (Token.Text, ' '),
+ (Token.Name, 'mul'),
+ (Token.Punctuation, '('),
+ (Token.Name, 'x'),
+ (Token.Punctuation, ','),
+ (Token.Text, ' '),
+ (Token.Escape, '$z^2$'),
+ (Token.Text, ''),
+ (Token.Punctuation, ')'),
+ (Token.Text, ' '),
+ (Token.Comment.Single, '# these |pipes| are untouched'), # note: not Token.Escape
+ (Token.Text, '\n'),
+ (Token.Generic.Prompt, '>>> '),
+ (Token.Text, ''),
+ (Token.Name, 'y'),
+ (Token.Text, '\n'),
+ (Token.Escape, '$1 + z^2$'),
+ (Token.Text, ''),
+ (Token.Generic.Output, '\n'),
+ ]
\ No newline at end of file
--- /dev/null
+# -*- coding: utf-8 -*-
+"""
+ Typograhic Number Theory tests
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import pytest
+
+from pygments.lexers.tnt import TNTLexer
+from pygments.token import Text, Comment, Operator, Keyword, Name, Number, \
+ Punctuation, Error
+
+@pytest.fixture(autouse=True)
+def lexer():
+ yield TNTLexer()
+
+# whitespace
+
+@pytest.mark.parametrize('text', (' a', ' \t0', '\n\n 3'))
+def test_whitespace_positive_matches(lexer, text):
+ """Test fragments that should be tokenized as whitespace text."""
+ assert lexer.whitespace(0, text) == len(text) - 1
+ assert lexer.whitespace(0, text, True) == len(text) - 1
+ assert lexer.cur[-1] == (0, Text, text[:-1])
+
+@pytest.mark.parametrize('text', ('0 a=b premise', 'b=a symmetry'))
+def test_whitespace_negative_matches(lexer, text):
+ """Test statements that do not start with whitespace text."""
+ assert lexer.whitespace(0, text) == 0
+ with pytest.raises(AssertionError):
+ lexer.whitespace(0, text, True)
+ assert not lexer.cur
+
+# terms that can go on either side of an = sign
+
+@pytest.mark.parametrize('text', ('a ', "a' ", 'b ', "c' "))
+def test_variable_positive_matches(lexer, text):
+ """Test fragments that should be tokenized as variables."""
+ assert lexer.variable(0, text) == len(text) - 1
+ assert lexer.cur[-1] == (0, Name.Variable, text[:-1])
+
+@pytest.mark.parametrize('text', ("' ", 'f ', "f' "))
+def test_variable_negative_matches(lexer, text):
+ """Test fragments that should **not** be tokenized as variables."""
+ with pytest.raises(AssertionError):
+ lexer.variable(0, text)
+ assert not lexer.cur
+
+@pytest.mark.parametrize('text', ('0', 'S0', 'SSSSS0'))
+def test_numeral_positive_matches(lexer, text):
+ """Test fragments that should be tokenized as (unary) numerals."""
+ assert lexer.term(0, text) == len(text)
+ assert lexer.cur[-1] == (len(text) - 1, Number.Integer, text[-1])
+ if text != '0':
+ assert lexer.cur[-2] == (0, Number.Integer, text[:-1])
+
+@pytest.mark.parametrize('text', (
+ '(a+b)', '(b.a)', '(c+d)'
+))
+def test_multiterm_positive_matches(lexer, text):
+ """Test fragments that should be tokenized as a compound term."""
+ assert lexer.term(0, text) == len(text)
+ assert [t[1] for t in lexer.cur] == [
+ Punctuation, Name.Variable, Operator,
+ Name.Variable, Punctuation
+ ]
+
+@pytest.mark.parametrize('text', ('1', '=', 'A'))
+def test_term_negative_matches(lexer, text):
+ """Test fragments that should not be tokenized as terms at all."""
+ with pytest.raises(AssertionError):
+ lexer.term(0, text)
+ assert not lexer.cur
+
+# full statements, minus rule
+
+@pytest.mark.parametrize('text', ('~a=b ', '~~~~a=b '))
+def test_negator_positive_matches(lexer, text):
+ """Test statements that start with a negation."""
+ assert lexer.formula(0, text) == len(text) - 1
+ assert lexer.cur[0] == (0, Operator, text[:-4])
+
+@pytest.mark.parametrize('text', ('Aa:a=b ', 'Eb:a=b '))
+def test_quantifier_positive_matches(lexer, text):
+ """Test statements that start with a quantifier."""
+ assert lexer.formula(0, text) == len(text) - 1
+ assert lexer.cur[0][1] == Keyword.Declaration
+ assert lexer.cur[1][1] == Name.Variable
+ assert lexer.cur[2] == (2, Punctuation, ':')
+
+@pytest.mark.parametrize('text', ('Aaa=b', 'Eba=b'))
+def test_quantifier_negative_matches(lexer, text):
+ """Test quantifiers that are only partially valid."""
+ with pytest.raises(AssertionError):
+ lexer.formula(0, text)
+ # leftovers should still be valid
+ assert lexer.cur[0][1] == Keyword.Declaration
+ assert lexer.cur[1][1] == Name.Variable
+
+@pytest.mark.parametrize('text', ('<a=b&b=a>', '<a=b|b=a>', '<a=b]b=a>'))
+def test_compound_positive_matches(lexer, text):
+ """Test statements that consist of multiple formulas compounded."""
+ assert lexer.formula(0, text) == len(text)
+ assert lexer.cur[0] == (0, Punctuation, '<')
+ assert lexer.cur[4][1] == Operator
+ assert lexer.cur[-1] == (len(text)-1, Punctuation, '>')
+
+@pytest.mark.parametrize('text', ('<a=b/b=a>', '<a=b&b=a '))
+def test_compound_negative_matches(lexer, text):
+ """Test statements that look like compounds but are invalid."""
+ with pytest.raises(AssertionError):
+ lexer.formula(0, text)
+ assert lexer.cur[0] == (0, Punctuation, '<')
+
+@pytest.mark.parametrize('text', ('a=b ', 'a=0 ', '0=b '))
+def test_formula_postive_matches(lexer, text):
+ """Test the normal singular formula."""
+ assert lexer.formula(0, text) == len(text) - 1
+ assert lexer.cur[0][2] == text[0]
+ assert lexer.cur[1] == (1, Operator, '=')
+ assert lexer.cur[2][2] == text[2]
+
+@pytest.mark.parametrize('text', ('a/b', '0+0 '))
+def test_formula_negative_matches(lexer, text):
+ """Test anything but an equals sign."""
+ with pytest.raises(AssertionError):
+ lexer.formula(0, text)
+
+# rules themselves
+
+@pytest.mark.parametrize('text', (
+ 'fantasy rule', 'carry over line 5', 'premise', 'joining',
+ 'double-tilde', 'switcheroo', 'De Morgan', 'specification'
+))
+def test_rule_positive_matches(lexer, text):
+ """Test some valid rules of TNT."""
+ assert lexer.rule(0, text) == len(text)
+ assert lexer.cur[0][:2] == (0, Keyword)
+ if text[-1].isdigit():
+ assert lexer.cur[1][1] == Number.Integer
+
+@pytest.mark.parametrize('text', (
+ 'fantasy', 'carry over', 'premse', 'unjoining',
+ 'triple-tilde', 'switcheru', 'De-Morgan', 'despecification'
+))
+def test_rule_negative_matches(lexer, text):
+ """Test some invalid rules of TNT."""
+ with pytest.raises(AssertionError):
+ lexer.rule(0, text)
+
+# referrals
+
+@pytest.mark.parametrize('text', ('(lines 1, 2, and 4)', '(line 3,5,6)', '(lines 1, 6 and 0)'))
+def test_lineno_positive_matches(lexer, text):
+ """Test line referrals."""
+ assert lexer.lineno(0, text) == len(text)
+ assert lexer.cur[0] == (0, Punctuation, '(')
+ assert lexer.cur[1][:2] == (1, Text)
+ assert lexer.cur[2][1] == Number.Integer
+ assert lexer.cur[3] == (len(text)-1, Punctuation, ')')
+
+@pytest.mark.parametrize('text', (
+ '(lines one, two, and four)1 ', # to avoid IndexError
+ '(lines 1 2 and 3)', '(lines 1 2 3)'
+))
+def test_lineno_negative_matches(lexer, text):
+ """Test invalid line referrals."""
+ with pytest.raises(AssertionError):
+ lexer.lineno(0, text)
+
+# worst-case: error text
+
+@pytest.mark.parametrize('text', ('asdf', 'fdsa\nasdf', 'asdf\n '))
+def test_error_till_line_end(lexer, text):
+ try:
+ nl = text.index('\n')
+ except ValueError:
+ nl = len(text)
+ try:
+ end = text.find(text.split(None, 2)[1])
+ except IndexError: # split failed
+ end = len(text)
+ assert lexer.error_till_line_end(0, text) == end
+ assert lexer.cur[0] == (0, Error, text[:nl])
+
+# full statement, including rule (because this can't be tested any other way)
+
+@pytest.mark.parametrize('text', ('[ push', '] pop'))
+def test_fantasy_positive_matches(lexer, text):
+ """Test statements that should be tokenized as push/pop statements."""
+ assert lexer.get_tokens_unprocessed(text)[0] == (0, Keyword, text[0])
+
+# full text is already done by examplefiles, but here's some exceptions
+
+@pytest.mark.parametrize('text', (
+ '0', 'a=b', 'premise',
+ '0 a=b premise', '1 b=a symmetry (line 0)'
+))
+def test_no_crashing(lexer, text):
+ """Test incomplete text fragments that shouldn't crash the whole lexer."""
+ assert lexer.get_tokens(text)
\ No newline at end of file
+++ /dev/null
-[tox]
-envlist = py35, py36, py37, py38, pypy3
-
-[testenv]
-deps =
- pytest
- pytest-cov
-commands = py.test {posargs}