From: JinWang An Date: Tue, 5 Jan 2021 03:19:58 +0000 (+0900) Subject: Imported Upstream version 2.5.1 X-Git-Tag: upstream/2.5.1^0 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=b88425f4af3c9b374a068de0e2a63e149b4b8d3b;p=platform%2Fupstream%2Fpython3-pygments.git Imported Upstream version 2.5.1 --- diff --git a/.hgignore b/.hgignore deleted file mode 100644 index b564df8..0000000 --- a/.hgignore +++ /dev/null @@ -1,20 +0,0 @@ -syntax: glob -*.egg -*.pyc -*.pyo -.*.sw[op] -.idea/ -.ropeproject -.project -.tags -.tox -.cache/ -Pygments.egg-info/* -TAGS -build/* -dist/* -doc/_build -TAGS -tests/.coverage -tests/cover -tests/examplefiles/output diff --git a/.hgtags b/.hgtags deleted file mode 100644 index 27ce430..0000000 --- a/.hgtags +++ /dev/null @@ -1,36 +0,0 @@ -634420aa4221cc1eb2b3753bd571166bd9e611d4 0.9 -942ecbb5c84ca5d57ae82f5697775973f4e12717 0.10 -63632d0340958d891176db20fe9a32a56abcd5ea 0.11 -13834ec94d2c5a90a68bc2c2a327abd962c486bc 0.11.1 -a5748745272afffd725570e068a560d46e28dc1f 1.0 -5a794a620dc711a219722a7af94d9d2e95cda26d 1.1 -dd81c35efd95292de4965153c66c8bbfe435f1c4 1.1.1 -e7691aa4f473a2cdaa2e5b7bfed8aec196719aca 0.5.1 -6f53364d63ddb8bd9532bb6ea402e3af05275b03 0.5 -11efe99c11e601071c3a77910b9fca769de66fbf 0.6 -99df0a7404d168b05626ffced6fd16edcf58c145 0.7 -d0b08fd569d3d9dafec4c045a7d8876442b3ef64 0.7.1 -1054522d1dda9c7899516ead3e65e5e363fdf30d 0.8 -066e56d8f5caa31e15386fff6f938bedd85a8732 0.8.1 -bae0833cae75e5a641abe3c4b430fa384cd9d258 1.2 -f6e5acee4f761696676e05a9112c91a5a5670b49 1.2.1 -580c5ce755486bc92c79c50f80cfc79924e15140 1.2.2 -c62867700c9e98cc2988c62f298ec54cee9b6927 1.3 -3a3846c2503db85bb70a243c8bc702629c4bce57 1.3.1 -8ad6d35dd2ab0530a1e2c088ab7fe0e00426b5f9 1.4 -eff3aee4abff2b72564ddfde77fcc82adbba52ad 1.5 -2c262bfc66b05a8aecc1109c3acc5b9447a5213c 1.6rc1 -7c962dcb484cb73394aec7f41709940340dc8a9c 1.6 -da509a68ea620bbb8ee3f5d5cf7761375d8f4451 2.0rc1 -ed3206a773e9cb90a0edeabee8ef6b56b5b9a53c 2.0 -94e1e056c92d97e3a54759f9216e8deff22efbdd 2.0.1 -142a870bf0f1822414649ae26f433b112a5c92d5 2.0.2 -34530db252d35d7ef57a8dbb9fce7bcc46f6ba6b 2.1 -2935c3a59672e8ae74ffb7ea66ea6567f49782f6 2.1.1 -8e7ebc56153cf899067333bff4f15ae98758a2e1 2.1.2 -88527db663dce0729c2cd6e3bc2f3c657ae39254 2.1.3 -cc861d7ba005b8bc91829210e7c4ee0eb6580434 2.2.0 -9c8cab961cde0a179b29e2de90946bf720a463f6 2.3.0 -f3f550c25fe0caba1cb725dd1994363c003d97f2 2.3.1 -88f01f09ab5e2e4689ac0daa287843e3820774fa 2.4.0 -d13e2dc5bb057b45c7998b68188ed57e63e23a73 2.4.1 diff --git a/CHANGES b/CHANGES index 6822bf9..5b19155 100644 --- a/CHANGES +++ b/CHANGES @@ -1,10 +1,82 @@ Pygments changelog ================== -Issue numbers refer to the tracker at -, +Since 2.5.0, issue numbers refer to the tracker at +, pull request numbers to the requests at -. +. + +Version 2.5.1 +------------- +(released November 26, 2019) + +- This release fixes a packaging issue. No functional changes. + +Version 2.5.0 +------------- +(released November 26, 2019) + +- Added lexers: + + * Email (PR#1246) + * Erlang, Elxir shells (PR#823, #1521) + * Notmuch (PR#1264) + * `Scdoc `_ (PR#1268) + * `Solidity `_ (#1214) + * `Zeek `_ (new name for Bro) (PR#1269) + * `Zig `_ (PR#820) + +- Updated lexers: + + * Apache2 Configuration (PR#1251) + * Bash sessions (#1253) + * CSound (PR#1250) + * Dart + * Dockerfile + * Emacs Lisp + * Handlebars (PR#773) + * Java (#1101, #987) + * Logtalk (PR#1261) + * Matlab (PR#1271) + * Praat (PR#1277) + * Python3 (PR#1255) + * Ruby + * YAML (#1528) + * Velocity + +- Added styles: + + * Inkpot (PR#1276) + +- The ``PythonLexer`` class is now an alias for the former ``Python3Lexer``. + The old ``PythonLexer`` is available as ``Python2Lexer``. Same change has + been done for the ``PythonTracebackLexer``. The ``python3`` option for + the ``PythonConsoleLexer`` is now true by default. + +- Bump ``NasmLexer`` priority over ``TasmLexer`` for ``.asm`` files + (fixes #1326) +- Default font in the ``ImageFormatter`` has been updated (#928, PR#1245) +- Test suite switched to py.test, removed nose dependency (#1490) +- Reduce ``TeraTerm`` lexer score -- it used to match nearly all languages + (#1256) +- Treat ``Skylark``/``Starlark`` files as Python files (PR#1259) +- Image formatter: actually respect ``line_number_separator`` option + +- Add LICENSE file to wheel builds +- Agda: fix lambda highlighting +- Dart: support ``@`` annotations +- Dockerfile: accept ``FROM ... AS`` syntax +- Emacs Lisp: add more string functions +- GAS: accept registers in directive arguments +- Java: make structural punctuation (braces, parens, colon, comma) ``Punctuation``, not ``Operator`` (#987) +- Java: support ``var`` contextual keyword (#1101) +- Matlab: Fix recognition of ``function`` keyword (PR#1271) +- Python: recognize ``.jy`` filenames (#976) +- Python: recognize ``f`` string prefix (#1156) +- Ruby: support squiggly heredocs +- Shell sessions: recognize Virtualenv prompt (PR#1266) +- Velocity: support silent reference syntax + Version 2.4.2 ------------- @@ -82,6 +154,7 @@ Version 2.4.0 - Updated Trove classifiers and ``pip`` requirements (PR#799) + Version 2.3.1 ------------- (released Dec 16, 2018) diff --git a/Makefile b/Makefile index 2fcb832..61ee393 100644 --- a/Makefile +++ b/Makefile @@ -49,14 +49,16 @@ pylint: reindent: @$(PYTHON) scripts/reindent.py -r -B . +TEST = tests + test: - @$(PYTHON) tests/run.py -d $(TEST) + @$(PYTHON) `which py.test` $(TEST) test-coverage: - @$(PYTHON) tests/run.py -d --with-coverage --cover-package=pygments --cover-erase $(TEST) + @$(PYTHON) `which py.test` --cov --cov-report=html --cov-report=term $(TEST) test-examplefiles: - nosetests tests/test_examplefiles.py + @$(PYTHON) `which py.test` tests.test_examplefiles tox-test: @tox -- $(TEST) diff --git a/PKG-INFO b/PKG-INFO index 17dbe83..620cb36 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.2 Name: Pygments -Version: 2.4.2 +Version: 2.5.1 Summary: Pygments is a syntax highlighting package written in Python. Home-page: http://pygments.org/ Author: Georg Brandl @@ -39,6 +39,9 @@ Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Operating System :: OS Independent Classifier: Topic :: Text Processing :: Filters Classifier: Topic :: Utilities diff --git a/Pygments.egg-info/PKG-INFO b/Pygments.egg-info/PKG-INFO index 17dbe83..620cb36 100644 --- a/Pygments.egg-info/PKG-INFO +++ b/Pygments.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.2 Name: Pygments -Version: 2.4.2 +Version: 2.5.1 Summary: Pygments is a syntax highlighting package written in Python. Home-page: http://pygments.org/ Author: Georg Brandl @@ -39,6 +39,9 @@ Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Operating System :: OS Independent Classifier: Topic :: Text Processing :: Filters Classifier: Topic :: Utilities diff --git a/Pygments.egg-info/SOURCES.txt b/Pygments.egg-info/SOURCES.txt index 965e419..a5ffa47 100644 --- a/Pygments.egg-info/SOURCES.txt +++ b/Pygments.egg-info/SOURCES.txt @@ -1,5 +1,3 @@ -.hgignore -.hgtags AUTHORS CHANGES LICENSE @@ -7,12 +5,8 @@ MANIFEST.in Makefile README.rst TODO -bitbucket-pipelines.yml -pygmentize -requirements.txt setup.cfg setup.py -tox.ini Pygments.egg-info/PKG-INFO Pygments.egg-info/SOURCES.txt Pygments.egg-info/dependency_links.txt @@ -27,10 +21,122 @@ doc/index.rst doc/languages.rst doc/make.bat doc/pygmentize.1 +doc/_build/doctrees/download.doctree +doc/_build/doctrees/environment.pickle +doc/_build/doctrees/faq.doctree +doc/_build/doctrees/index.doctree +doc/_build/doctrees/languages.doctree +doc/_build/doctrees/docs/api.doctree +doc/_build/doctrees/docs/authors.doctree +doc/_build/doctrees/docs/changelog.doctree +doc/_build/doctrees/docs/cmdline.doctree +doc/_build/doctrees/docs/filterdevelopment.doctree +doc/_build/doctrees/docs/filters.doctree +doc/_build/doctrees/docs/formatterdevelopment.doctree +doc/_build/doctrees/docs/formatters.doctree +doc/_build/doctrees/docs/index.doctree +doc/_build/doctrees/docs/integrate.doctree +doc/_build/doctrees/docs/java.doctree +doc/_build/doctrees/docs/lexerdevelopment.doctree +doc/_build/doctrees/docs/lexers.doctree +doc/_build/doctrees/docs/moinmoin.doctree +doc/_build/doctrees/docs/plugins.doctree +doc/_build/doctrees/docs/quickstart.doctree +doc/_build/doctrees/docs/rstdirective.doctree +doc/_build/doctrees/docs/styles.doctree +doc/_build/doctrees/docs/tokens.doctree +doc/_build/doctrees/docs/unicode.doctree +doc/_build/html/.buildinfo +doc/_build/html/download.html +doc/_build/html/faq.html +doc/_build/html/genindex.html +doc/_build/html/index.html +doc/_build/html/languages.html +doc/_build/html/objects.inv +doc/_build/html/py-modindex.html +doc/_build/html/search.html +doc/_build/html/searchindex.js +doc/_build/html/_sources/download.rst.txt +doc/_build/html/_sources/faq.rst.txt +doc/_build/html/_sources/index.rst.txt +doc/_build/html/_sources/languages.rst.txt +doc/_build/html/_sources/docs/api.rst.txt +doc/_build/html/_sources/docs/authors.rst.txt +doc/_build/html/_sources/docs/changelog.rst.txt +doc/_build/html/_sources/docs/cmdline.rst.txt +doc/_build/html/_sources/docs/filterdevelopment.rst.txt +doc/_build/html/_sources/docs/filters.rst.txt +doc/_build/html/_sources/docs/formatterdevelopment.rst.txt +doc/_build/html/_sources/docs/formatters.rst.txt +doc/_build/html/_sources/docs/index.rst.txt +doc/_build/html/_sources/docs/integrate.rst.txt +doc/_build/html/_sources/docs/java.rst.txt +doc/_build/html/_sources/docs/lexerdevelopment.rst.txt +doc/_build/html/_sources/docs/lexers.rst.txt +doc/_build/html/_sources/docs/moinmoin.rst.txt +doc/_build/html/_sources/docs/plugins.rst.txt +doc/_build/html/_sources/docs/quickstart.rst.txt +doc/_build/html/_sources/docs/rstdirective.rst.txt +doc/_build/html/_sources/docs/styles.rst.txt +doc/_build/html/_sources/docs/tokens.rst.txt +doc/_build/html/_sources/docs/unicode.rst.txt +doc/_build/html/_static/basic.css +doc/_build/html/_static/bodybg.png +doc/_build/html/_static/demo.css +doc/_build/html/_static/demo.js +doc/_build/html/_static/docbg.png +doc/_build/html/_static/doctools.js +doc/_build/html/_static/documentation_options.js +doc/_build/html/_static/favicon.ico +doc/_build/html/_static/file.png +doc/_build/html/_static/github.png +doc/_build/html/_static/jquery-3.4.1.js +doc/_build/html/_static/jquery.js +doc/_build/html/_static/language_data.js +doc/_build/html/_static/listitem.png +doc/_build/html/_static/logo.png +doc/_build/html/_static/logo_new.png +doc/_build/html/_static/logo_only.png +doc/_build/html/_static/minus.png +doc/_build/html/_static/plus.png +doc/_build/html/_static/pocoo.png +doc/_build/html/_static/pygments.css +doc/_build/html/_static/pygments14.css +doc/_build/html/_static/searchtools.js +doc/_build/html/_static/spinner.gif +doc/_build/html/_static/underscore-1.3.1.js +doc/_build/html/_static/underscore.js +doc/_build/html/docs/api.html +doc/_build/html/docs/authors.html +doc/_build/html/docs/changelog.html +doc/_build/html/docs/cmdline.html +doc/_build/html/docs/filterdevelopment.html +doc/_build/html/docs/filters.html +doc/_build/html/docs/formatterdevelopment.html +doc/_build/html/docs/formatters.html +doc/_build/html/docs/index.html +doc/_build/html/docs/integrate.html +doc/_build/html/docs/java.html +doc/_build/html/docs/lexerdevelopment.html +doc/_build/html/docs/lexers.html +doc/_build/html/docs/moinmoin.html +doc/_build/html/docs/plugins.html +doc/_build/html/docs/quickstart.html +doc/_build/html/docs/rstdirective.html +doc/_build/html/docs/styles.html +doc/_build/html/docs/tokens.html +doc/_build/html/docs/unicode.html +doc/_static/demo.css +doc/_static/demo.js doc/_static/favicon.ico +doc/_static/github.png doc/_static/logo_new.png doc/_static/logo_only.png +doc/_static/spinner.gif +doc/_templates/demo.html +doc/_templates/demo_sidebar.html doc/_templates/docssidebar.html +doc/_templates/index_with_try.html doc/_templates/indexsidebar.html doc/_themes/pygments14/layout.html doc/_themes/pygments14/theme.conf @@ -67,6 +173,7 @@ external/moin-parser.py external/pygments.bashcomp external/rst-directive.py pygments/__init__.py +pygments/__main__.py pygments/cmdline.py pygments/console.py pygments/filter.py @@ -147,6 +254,7 @@ pygments/lexers/dylan.py pygments/lexers/ecl.py pygments/lexers/eiffel.py pygments/lexers/elm.py +pygments/lexers/email.py pygments/lexers/erlang.py pygments/lexers/esoteric.py pygments/lexers/ezhil.py @@ -183,6 +291,7 @@ pygments/lexers/make.py pygments/lexers/markup.py pygments/lexers/math.py pygments/lexers/matlab.py +pygments/lexers/mime.py pygments/lexers/ml.py pygments/lexers/modeling.py pygments/lexers/modula2.py @@ -216,6 +325,7 @@ pygments/lexers/robotframework.py pygments/lexers/ruby.py pygments/lexers/rust.py pygments/lexers/sas.py +pygments/lexers/scdoc.py pygments/lexers/scripting.py pygments/lexers/sgf.py pygments/lexers/shell.py @@ -223,6 +333,7 @@ pygments/lexers/slash.py pygments/lexers/smalltalk.py pygments/lexers/smv.py pygments/lexers/snobol.py +pygments/lexers/solidity.py pygments/lexers/special.py pygments/lexers/sql.py pygments/lexers/stata.py @@ -246,6 +357,7 @@ pygments/lexers/webmisc.py pygments/lexers/whiley.py pygments/lexers/x10.py pygments/lexers/xorg.py +pygments/lexers/zig.py pygments/styles/__init__.py pygments/styles/abap.py pygments/styles/algol.py @@ -260,6 +372,7 @@ pygments/styles/emacs.py pygments/styles/friendly.py pygments/styles/fruity.py pygments/styles/igor.py +pygments/styles/inkpot.py pygments/styles/lovelace.py pygments/styles/manni.py pygments/styles/monokai.py @@ -280,7 +393,6 @@ pygments/styles/trac.py pygments/styles/vim.py pygments/styles/vs.py pygments/styles/xcode.py -scripts/.release-checklist.swp scripts/check_sources.py scripts/debug_lexer.py scripts/detect_missing_analyse_text.py @@ -290,9 +402,8 @@ scripts/get_vimkw.py scripts/pylintrc scripts/release-checklist scripts/vim2pygments.py -tests/run.py -tests/string_asserts.py -tests/support.py +tests/__init__.py +tests/test_apache_conf.py tests/test_asm.py tests/test_basic.py tests/test_basic_api.py @@ -306,6 +417,7 @@ tests/test_csound.py tests/test_data.py tests/test_examplefiles.py tests/test_ezhil.py +tests/test_guessing.py tests/test_html_formatter.py tests/test_inherit.py tests/test_irc_formatter.py @@ -332,7 +444,6 @@ tests/test_ruby.py tests/test_shell.py tests/test_smarty.py tests/test_sql.py -tests/test_string_asserts.py tests/test_terminal_formatter.py tests/test_textfmts.py tests/test_token.py @@ -366,6 +477,7 @@ tests/examplefiles/Get-CommandDefinitionHtml.ps1 tests/examplefiles/IPDispatchC.nc tests/examplefiles/IPDispatchP.nc tests/examplefiles/Intro.java +tests/examplefiles/MIME_example.eml tests/examplefiles/Makefile tests/examplefiles/Object.st tests/examplefiles/OrderedMap.hx @@ -442,6 +554,7 @@ tests/examplefiles/example.coffee tests/examplefiles/example.cpp tests/examplefiles/example.e tests/examplefiles/example.elm +tests/examplefiles/example.eml tests/examplefiles/example.ezt tests/examplefiles/example.f90 tests/examplefiles/example.feature @@ -507,6 +620,7 @@ tests/examplefiles/example.sgf tests/examplefiles/example.sh tests/examplefiles/example.sh-session tests/examplefiles/example.shell-session +tests/examplefiles/example.shex tests/examplefiles/example.sl tests/examplefiles/example.slim tests/examplefiles/example.sls @@ -531,6 +645,7 @@ tests/examplefiles/example.xhtml tests/examplefiles/example.xtend tests/examplefiles/example.xtm tests/examplefiles/example.yaml +tests/examplefiles/example.zig tests/examplefiles/example1.cadl tests/examplefiles/example2.aspx tests/examplefiles/example2.cpp @@ -618,6 +733,7 @@ tests/examplefiles/nasm_objexe.asm tests/examplefiles/nemerle_sample.n tests/examplefiles/nginx_nginx.conf tests/examplefiles/noexcept.cpp +tests/examplefiles/notmuch_example tests/examplefiles/numbers.c tests/examplefiles/objc_example.m tests/examplefiles/openedge_example @@ -659,6 +775,7 @@ tests/examplefiles/robotframework_test.txt tests/examplefiles/rql-queries.rql tests/examplefiles/ruby_func_def.rb tests/examplefiles/sample.qvto +tests/examplefiles/scdoc_manual.scd tests/examplefiles/scilab.sci tests/examplefiles/scope.cirru tests/examplefiles/session.dylan-console @@ -754,6 +871,7 @@ tests/examplefiles/test.scaml tests/examplefiles/test.sco tests/examplefiles/test.shen tests/examplefiles/test.sil +tests/examplefiles/test.sol tests/examplefiles/test.ssp tests/examplefiles/test.swift tests/examplefiles/test.tcsh @@ -761,6 +879,7 @@ tests/examplefiles/test.vb tests/examplefiles/test.vhdl tests/examplefiles/test.xqy tests/examplefiles/test.xsl +tests/examplefiles/test.zeek tests/examplefiles/test.zep tests/examplefiles/test2.odin tests/examplefiles/test2.pypylog @@ -809,6 +928,7 @@ tests/examplefiles/output/Get-CommandDefinitionHtml.ps1 tests/examplefiles/output/IPDispatchC.nc tests/examplefiles/output/IPDispatchP.nc tests/examplefiles/output/Intro.java +tests/examplefiles/output/MIME_example.eml tests/examplefiles/output/Makefile tests/examplefiles/output/Object.st tests/examplefiles/output/OrderedMap.hx @@ -885,6 +1005,7 @@ tests/examplefiles/output/example.coffee tests/examplefiles/output/example.cpp tests/examplefiles/output/example.e tests/examplefiles/output/example.elm +tests/examplefiles/output/example.eml tests/examplefiles/output/example.ezt tests/examplefiles/output/example.f90 tests/examplefiles/output/example.feature @@ -950,6 +1071,7 @@ tests/examplefiles/output/example.sgf tests/examplefiles/output/example.sh tests/examplefiles/output/example.sh-session tests/examplefiles/output/example.shell-session +tests/examplefiles/output/example.shex tests/examplefiles/output/example.sl tests/examplefiles/output/example.slim tests/examplefiles/output/example.sls @@ -974,6 +1096,7 @@ tests/examplefiles/output/example.xhtml tests/examplefiles/output/example.xtend tests/examplefiles/output/example.xtm tests/examplefiles/output/example.yaml +tests/examplefiles/output/example.zig tests/examplefiles/output/example1.cadl tests/examplefiles/output/example2.aspx tests/examplefiles/output/example2.cpp diff --git a/README.rst b/README.rst index 350e242..ef0cbfd 100644 --- a/README.rst +++ b/README.rst @@ -1,19 +1,20 @@ -README for Pygments +Welcome to Pygments =================== -This is the source of Pygments. It is a generic syntax highlighter that -supports over 300 languages and text formats, for use in code hosting, forums, -wikis or other applications that need to prettify source code. +This is the source of Pygments. It is a **generic syntax highlighter** written +in Python that supports over 300 languages and text formats, for use in code +hosting, forums, wikis or other applications that need to prettify source code. Installing ---------- -... works as usual, use ``python setup.py install``. +... works as usual, use ``pip install Pygments`` to get published versions, +or ``python setup.py install`` to install from a checkout. Documentation ------------- -... can be found online at http://pygments.org/ or created by :: +... can be found online at http://pygments.org/ or created with Sphinx by :: cd doc make html @@ -21,19 +22,23 @@ Documentation Development ----------- -... takes place on `Bitbucket -`_, where the Mercurial -repository, tickets and pull requests can be viewed. +... takes place on `GitHub `_, where the +Git repository, tickets and pull requests can be viewed. -Continuous testing runs on drone.io: +Continuous testing runs on GitHub workflows: -.. image:: https://drone.io/bitbucket.org/birkenfeld/pygments-main/status.png - :target: https://drone.io/bitbucket.org/birkenfeld/pygments-main +.. image:: https://github.com/pygments/pygments/workflows/Pygments/badge.svg + :target: https://github.com/pygments/pygments/actions?query=workflow%3APygments The authors ----------- -Pygments is maintained by **Georg Brandl**, e-mail address *georg*\ *@*\ *python.org*. +Pygments is maintained by **Georg Brandl**, e-mail address *georg*\ *@*\ *python.org* +and **Matthäus Chajdas**. Many lexers and fixes have been contributed by **Armin Ronacher**, the rest of the `Pocoo `_ team and **Tim Hatch**. + +The code is distributed under the BSD 2-clause license. Contributors making pull +requests must agree that they are able and willing to put their contributions +under that license. diff --git a/bitbucket-pipelines.yml b/bitbucket-pipelines.yml deleted file mode 100644 index 4a9f1b6..0000000 --- a/bitbucket-pipelines.yml +++ /dev/null @@ -1,34 +0,0 @@ -pipelines: - default: - - step: - name: Test on Python 2.7 - image: python:2.7 - caches: - - pip - script: - - pip install -r requirements.txt - - tox -e py27 - - step: - name: Test on Python 3.5 - image: python:3.5 - caches: - - pip - script: - - pip install -r requirements.txt - - tox -e py35 - - step: - name: Test on Python 3.6 - image: python:3.6 - caches: - - pip - script: - - pip install -r requirements.txt - - tox -e py36 - - step: - name: Test on Python 3.7 - image: python:3.7 - caches: - - pip - script: - - pip install -r requirements.txt - - tox -e py37 diff --git a/doc/_build/doctrees/docs/api.doctree b/doc/_build/doctrees/docs/api.doctree new file mode 100644 index 0000000..730fb8b Binary files /dev/null and b/doc/_build/doctrees/docs/api.doctree differ diff --git a/doc/_build/doctrees/docs/authors.doctree b/doc/_build/doctrees/docs/authors.doctree new file mode 100644 index 0000000..0d25fa8 Binary files /dev/null and b/doc/_build/doctrees/docs/authors.doctree differ diff --git a/doc/_build/doctrees/docs/changelog.doctree b/doc/_build/doctrees/docs/changelog.doctree new file mode 100644 index 0000000..6edfce7 Binary files /dev/null and b/doc/_build/doctrees/docs/changelog.doctree differ diff --git a/doc/_build/doctrees/docs/cmdline.doctree b/doc/_build/doctrees/docs/cmdline.doctree new file mode 100644 index 0000000..74544a0 Binary files /dev/null and b/doc/_build/doctrees/docs/cmdline.doctree differ diff --git a/doc/_build/doctrees/docs/filterdevelopment.doctree b/doc/_build/doctrees/docs/filterdevelopment.doctree new file mode 100644 index 0000000..36ac3ef Binary files /dev/null and b/doc/_build/doctrees/docs/filterdevelopment.doctree differ diff --git a/doc/_build/doctrees/docs/filters.doctree b/doc/_build/doctrees/docs/filters.doctree new file mode 100644 index 0000000..e407baa Binary files /dev/null and b/doc/_build/doctrees/docs/filters.doctree differ diff --git a/doc/_build/doctrees/docs/formatterdevelopment.doctree b/doc/_build/doctrees/docs/formatterdevelopment.doctree new file mode 100644 index 0000000..9feccf2 Binary files /dev/null and b/doc/_build/doctrees/docs/formatterdevelopment.doctree differ diff --git a/doc/_build/doctrees/docs/formatters.doctree b/doc/_build/doctrees/docs/formatters.doctree new file mode 100644 index 0000000..b128f6a Binary files /dev/null and b/doc/_build/doctrees/docs/formatters.doctree differ diff --git a/doc/_build/doctrees/docs/index.doctree b/doc/_build/doctrees/docs/index.doctree new file mode 100644 index 0000000..5bf88f3 Binary files /dev/null and b/doc/_build/doctrees/docs/index.doctree differ diff --git a/doc/_build/doctrees/docs/integrate.doctree b/doc/_build/doctrees/docs/integrate.doctree new file mode 100644 index 0000000..b41e758 Binary files /dev/null and b/doc/_build/doctrees/docs/integrate.doctree differ diff --git a/doc/_build/doctrees/docs/java.doctree b/doc/_build/doctrees/docs/java.doctree new file mode 100644 index 0000000..4f61c1c Binary files /dev/null and b/doc/_build/doctrees/docs/java.doctree differ diff --git a/doc/_build/doctrees/docs/lexerdevelopment.doctree b/doc/_build/doctrees/docs/lexerdevelopment.doctree new file mode 100644 index 0000000..f7d372e Binary files /dev/null and b/doc/_build/doctrees/docs/lexerdevelopment.doctree differ diff --git a/doc/_build/doctrees/docs/lexers.doctree b/doc/_build/doctrees/docs/lexers.doctree new file mode 100644 index 0000000..ea4ab6d Binary files /dev/null and b/doc/_build/doctrees/docs/lexers.doctree differ diff --git a/doc/_build/doctrees/docs/moinmoin.doctree b/doc/_build/doctrees/docs/moinmoin.doctree new file mode 100644 index 0000000..ef2922a Binary files /dev/null and b/doc/_build/doctrees/docs/moinmoin.doctree differ diff --git a/doc/_build/doctrees/docs/plugins.doctree b/doc/_build/doctrees/docs/plugins.doctree new file mode 100644 index 0000000..c8af0ba Binary files /dev/null and b/doc/_build/doctrees/docs/plugins.doctree differ diff --git a/doc/_build/doctrees/docs/quickstart.doctree b/doc/_build/doctrees/docs/quickstart.doctree new file mode 100644 index 0000000..82ac98c Binary files /dev/null and b/doc/_build/doctrees/docs/quickstart.doctree differ diff --git a/doc/_build/doctrees/docs/rstdirective.doctree b/doc/_build/doctrees/docs/rstdirective.doctree new file mode 100644 index 0000000..1e80256 Binary files /dev/null and b/doc/_build/doctrees/docs/rstdirective.doctree differ diff --git a/doc/_build/doctrees/docs/styles.doctree b/doc/_build/doctrees/docs/styles.doctree new file mode 100644 index 0000000..3cf3380 Binary files /dev/null and b/doc/_build/doctrees/docs/styles.doctree differ diff --git a/doc/_build/doctrees/docs/tokens.doctree b/doc/_build/doctrees/docs/tokens.doctree new file mode 100644 index 0000000..6f4a1a7 Binary files /dev/null and b/doc/_build/doctrees/docs/tokens.doctree differ diff --git a/doc/_build/doctrees/docs/unicode.doctree b/doc/_build/doctrees/docs/unicode.doctree new file mode 100644 index 0000000..ec129cb Binary files /dev/null and b/doc/_build/doctrees/docs/unicode.doctree differ diff --git a/doc/_build/doctrees/download.doctree b/doc/_build/doctrees/download.doctree new file mode 100644 index 0000000..6788fc3 Binary files /dev/null and b/doc/_build/doctrees/download.doctree differ diff --git a/doc/_build/doctrees/environment.pickle b/doc/_build/doctrees/environment.pickle new file mode 100644 index 0000000..f0a0d35 Binary files /dev/null and b/doc/_build/doctrees/environment.pickle differ diff --git a/doc/_build/doctrees/faq.doctree b/doc/_build/doctrees/faq.doctree new file mode 100644 index 0000000..6f5324c Binary files /dev/null and b/doc/_build/doctrees/faq.doctree differ diff --git a/doc/_build/doctrees/index.doctree b/doc/_build/doctrees/index.doctree new file mode 100644 index 0000000..e35a485 Binary files /dev/null and b/doc/_build/doctrees/index.doctree differ diff --git a/doc/_build/doctrees/languages.doctree b/doc/_build/doctrees/languages.doctree new file mode 100644 index 0000000..1ef0561 Binary files /dev/null and b/doc/_build/doctrees/languages.doctree differ diff --git a/doc/_build/html/.buildinfo b/doc/_build/html/.buildinfo new file mode 100644 index 0000000..23b7cb1 --- /dev/null +++ b/doc/_build/html/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: c79bd2c7f6735b09a44f7dfcaa237099 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/doc/_build/html/_sources/docs/api.rst.txt b/doc/_build/html/_sources/docs/api.rst.txt new file mode 100644 index 0000000..a6b242d --- /dev/null +++ b/doc/_build/html/_sources/docs/api.rst.txt @@ -0,0 +1,354 @@ +.. -*- mode: rst -*- + +===================== +The full Pygments API +===================== + +This page describes the Pygments API. + +High-level API +============== + +.. module:: pygments + +Functions from the :mod:`pygments` module: + +.. function:: lex(code, lexer) + + Lex `code` with the `lexer` (must be a `Lexer` instance) + and return an iterable of tokens. Currently, this only calls + `lexer.get_tokens()`. + +.. function:: format(tokens, formatter, outfile=None) + + Format a token stream (iterable of tokens) `tokens` with the + `formatter` (must be a `Formatter` instance). The result is + written to `outfile`, or if that is ``None``, returned as a + string. + +.. function:: highlight(code, lexer, formatter, outfile=None) + + This is the most high-level highlighting function. + It combines `lex` and `format` in one function. + + +.. module:: pygments.lexers + +Functions from :mod:`pygments.lexers`: + +.. function:: get_lexer_by_name(alias, **options) + + Return an instance of a `Lexer` subclass that has `alias` in its + aliases list. The lexer is given the `options` at its + instantiation. + + Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is + found. + +.. function:: get_lexer_for_filename(fn, **options) + + Return a `Lexer` subclass instance that has a filename pattern + matching `fn`. The lexer is given the `options` at its + instantiation. + + Will raise :exc:`pygments.util.ClassNotFound` if no lexer for that filename + is found. + +.. function:: get_lexer_for_mimetype(mime, **options) + + Return a `Lexer` subclass instance that has `mime` in its mimetype + list. The lexer is given the `options` at its instantiation. + + Will raise :exc:`pygments.util.ClassNotFound` if not lexer for that mimetype + is found. + +.. function:: load_lexer_from_file(filename, lexername="CustomLexer", **options) + + Return a `Lexer` subclass instance loaded from the provided file, relative + to the current directory. The file is expected to contain a Lexer class + named `lexername` (by default, CustomLexer). Users should be very careful with + the input, because this method is equivalent to running eval on the input file. + The lexer is given the `options` at its instantiation. + + :exc:`ClassNotFound` is raised if there are any errors loading the Lexer + + .. versionadded:: 2.2 + +.. function:: guess_lexer(text, **options) + + Return a `Lexer` subclass instance that's guessed from the text in + `text`. For that, the :meth:`.analyse_text()` method of every known lexer + class is called with the text as argument, and the lexer which returned the + highest value will be instantiated and returned. + + :exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can + handle the content. + +.. function:: guess_lexer_for_filename(filename, text, **options) + + As :func:`guess_lexer()`, but only lexers which have a pattern in `filenames` + or `alias_filenames` that matches `filename` are taken into consideration. + + :exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can + handle the content. + +.. function:: get_all_lexers() + + Return an iterable over all registered lexers, yielding tuples in the + format:: + + (longname, tuple of aliases, tuple of filename patterns, tuple of mimetypes) + + .. versionadded:: 0.6 + +.. function:: find_lexer_class_by_name(alias) + + Return the `Lexer` subclass that has `alias` in its aliases list, without + instantiating it. + + Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is + found. + + .. versionadded:: 2.2 + +.. function:: find_lexer_class(name) + + Return the `Lexer` subclass that with the *name* attribute as given by + the *name* argument. + + +.. module:: pygments.formatters + +Functions from :mod:`pygments.formatters`: + +.. function:: get_formatter_by_name(alias, **options) + + Return an instance of a :class:`.Formatter` subclass that has `alias` in its + aliases list. The formatter is given the `options` at its instantiation. + + Will raise :exc:`pygments.util.ClassNotFound` if no formatter with that + alias is found. + +.. function:: get_formatter_for_filename(fn, **options) + + Return a :class:`.Formatter` subclass instance that has a filename pattern + matching `fn`. The formatter is given the `options` at its instantiation. + + Will raise :exc:`pygments.util.ClassNotFound` if no formatter for that filename + is found. + +.. function:: load_formatter_from_file(filename, formattername="CustomFormatter", **options) + + Return a `Formatter` subclass instance loaded from the provided file, relative + to the current directory. The file is expected to contain a Formatter class + named ``formattername`` (by default, CustomFormatter). Users should be very + careful with the input, because this method is equivalent to running eval + on the input file. The formatter is given the `options` at its instantiation. + + :exc:`ClassNotFound` is raised if there are any errors loading the Formatter + + .. versionadded:: 2.2 + +.. module:: pygments.styles + +Functions from :mod:`pygments.styles`: + +.. function:: get_style_by_name(name) + + Return a style class by its short name. The names of the builtin styles + are listed in :data:`pygments.styles.STYLE_MAP`. + + Will raise :exc:`pygments.util.ClassNotFound` if no style of that name is + found. + +.. function:: get_all_styles() + + Return an iterable over all registered styles, yielding their names. + + .. versionadded:: 0.6 + + +.. module:: pygments.lexer + +Lexers +====== + +The base lexer class from which all lexers are derived is: + +.. class:: Lexer(**options) + + The constructor takes a \*\*keywords dictionary of options. + Every subclass must first process its own options and then call + the `Lexer` constructor, since it processes the `stripnl`, + `stripall` and `tabsize` options. + + An example looks like this: + + .. sourcecode:: python + + def __init__(self, **options): + self.compress = options.get('compress', '') + Lexer.__init__(self, **options) + + As these options must all be specifiable as strings (due to the + command line usage), there are various utility functions + available to help with that, see `Option processing`_. + + .. method:: get_tokens(text) + + This method is the basic interface of a lexer. It is called by + the `highlight()` function. It must process the text and return an + iterable of ``(tokentype, value)`` pairs from `text`. + + Normally, you don't need to override this method. The default + implementation processes the `stripnl`, `stripall` and `tabsize` + options and then yields all tokens from `get_tokens_unprocessed()`, + with the ``index`` dropped. + + .. method:: get_tokens_unprocessed(text) + + This method should process the text and return an iterable of + ``(index, tokentype, value)`` tuples where ``index`` is the starting + position of the token within the input text. + + This method must be overridden by subclasses. + + .. staticmethod:: analyse_text(text) + + A static method which is called for lexer guessing. It should analyse + the text and return a float in the range from ``0.0`` to ``1.0``. + If it returns ``0.0``, the lexer will not be selected as the most + probable one, if it returns ``1.0``, it will be selected immediately. + + .. note:: You don't have to add ``@staticmethod`` to the definition of + this method, this will be taken care of by the Lexer's metaclass. + + For a list of known tokens have a look at the :doc:`tokens` page. + + A lexer also can have the following attributes (in fact, they are mandatory + except `alias_filenames`) that are used by the builtin lookup mechanism. + + .. attribute:: name + + Full name for the lexer, in human-readable form. + + .. attribute:: aliases + + A list of short, unique identifiers that can be used to lookup + the lexer from a list, e.g. using `get_lexer_by_name()`. + + .. attribute:: filenames + + A list of `fnmatch` patterns that match filenames which contain + content for this lexer. The patterns in this list should be unique among + all lexers. + + .. attribute:: alias_filenames + + A list of `fnmatch` patterns that match filenames which may or may not + contain content for this lexer. This list is used by the + :func:`.guess_lexer_for_filename()` function, to determine which lexers + are then included in guessing the correct one. That means that + e.g. every lexer for HTML and a template language should include + ``\*.html`` in this list. + + .. attribute:: mimetypes + + A list of MIME types for content that can be lexed with this + lexer. + + +.. module:: pygments.formatter + +Formatters +========== + +A formatter is derived from this class: + + +.. class:: Formatter(**options) + + As with lexers, this constructor processes options and then must call the + base class :meth:`__init__`. + + The :class:`Formatter` class recognizes the options `style`, `full` and + `title`. It is up to the formatter class whether it uses them. + + .. method:: get_style_defs(arg='') + + This method must return statements or declarations suitable to define + the current style for subsequent highlighted text (e.g. CSS classes + in the `HTMLFormatter`). + + The optional argument `arg` can be used to modify the generation and + is formatter dependent (it is standardized because it can be given on + the command line). + + This method is called by the ``-S`` :doc:`command-line option `, + the `arg` is then given by the ``-a`` option. + + .. method:: format(tokensource, outfile) + + This method must format the tokens from the `tokensource` iterable and + write the formatted version to the file object `outfile`. + + Formatter options can control how exactly the tokens are converted. + + .. versionadded:: 0.7 + A formatter must have the following attributes that are used by the + builtin lookup mechanism. + + .. attribute:: name + + Full name for the formatter, in human-readable form. + + .. attribute:: aliases + + A list of short, unique identifiers that can be used to lookup + the formatter from a list, e.g. using :func:`.get_formatter_by_name()`. + + .. attribute:: filenames + + A list of :mod:`fnmatch` patterns that match filenames for which this + formatter can produce output. The patterns in this list should be unique + among all formatters. + + +.. module:: pygments.util + +Option processing +================= + +The :mod:`pygments.util` module has some utility functions usable for option +processing: + +.. exception:: OptionError + + This exception will be raised by all option processing functions if + the type or value of the argument is not correct. + +.. function:: get_bool_opt(options, optname, default=None) + + Interpret the key `optname` from the dictionary `options` as a boolean and + return it. Return `default` if `optname` is not in `options`. + + The valid string values for ``True`` are ``1``, ``yes``, ``true`` and + ``on``, the ones for ``False`` are ``0``, ``no``, ``false`` and ``off`` + (matched case-insensitively). + +.. function:: get_int_opt(options, optname, default=None) + + As :func:`get_bool_opt`, but interpret the value as an integer. + +.. function:: get_list_opt(options, optname, default=None) + + If the key `optname` from the dictionary `options` is a string, + split it at whitespace and return it. If it is already a list + or a tuple, it is returned as a list. + +.. function:: get_choice_opt(options, optname, allowed, default=None) + + If the key `optname` from the dictionary is not in the sequence + `allowed`, raise an error, otherwise return it. + + .. versionadded:: 0.8 diff --git a/doc/_build/html/_sources/docs/authors.rst.txt b/doc/_build/html/_sources/docs/authors.rst.txt new file mode 100644 index 0000000..f8373f0 --- /dev/null +++ b/doc/_build/html/_sources/docs/authors.rst.txt @@ -0,0 +1,4 @@ +Full contributor list +===================== + +.. include:: ../../AUTHORS diff --git a/doc/_build/html/_sources/docs/changelog.rst.txt b/doc/_build/html/_sources/docs/changelog.rst.txt new file mode 100644 index 0000000..f264cab --- /dev/null +++ b/doc/_build/html/_sources/docs/changelog.rst.txt @@ -0,0 +1 @@ +.. include:: ../../CHANGES diff --git a/doc/_build/html/_sources/docs/cmdline.rst.txt b/doc/_build/html/_sources/docs/cmdline.rst.txt new file mode 100644 index 0000000..e4f94ea --- /dev/null +++ b/doc/_build/html/_sources/docs/cmdline.rst.txt @@ -0,0 +1,166 @@ +.. -*- mode: rst -*- + +====================== +Command Line Interface +====================== + +You can use Pygments from the shell, provided you installed the +:program:`pygmentize` script:: + + $ pygmentize test.py + print "Hello World" + +will print the file test.py to standard output, using the Python lexer +(inferred from the file name extension) and the terminal formatter (because +you didn't give an explicit formatter name). + +If you want HTML output:: + + $ pygmentize -f html -l python -o test.html test.py + +As you can see, the -l option explicitly selects a lexer. As seen above, if you +give an input file name and it has an extension that Pygments recognizes, you can +omit this option. + +The ``-o`` option gives an output file name. If it is not given, output is +written to stdout. + +The ``-f`` option selects a formatter (as with ``-l``, it can also be omitted +if an output file name is given and has a supported extension). +If no output file name is given and ``-f`` is omitted, the +:class:`.TerminalFormatter` is used. + +The above command could therefore also be given as:: + + $ pygmentize -o test.html test.py + +To create a full HTML document, including line numbers and stylesheet (using the +"emacs" style), highlighting the Python file ``test.py`` to ``test.html``:: + + $ pygmentize -O full,style=emacs -o test.html test.py + + +Options and filters +------------------- + +Lexer and formatter options can be given using the ``-O`` option:: + + $ pygmentize -f html -O style=colorful,linenos=1 -l python test.py + +Be sure to enclose the option string in quotes if it contains any special shell +characters, such as spaces or expansion wildcards like ``*``. If an option +expects a list value, separate the list entries with spaces (you'll have to +quote the option value in this case too, so that the shell doesn't split it). + +Since the ``-O`` option argument is split at commas and expects the split values +to be of the form ``name=value``, you can't give an option value that contains +commas or equals signs. Therefore, an option ``-P`` is provided (as of Pygments +0.9) that works like ``-O`` but can only pass one option per ``-P``. Its value +can then contain all characters:: + + $ pygmentize -P "heading=Pygments, the Python highlighter" ... + +Filters are added to the token stream using the ``-F`` option:: + + $ pygmentize -f html -l pascal -F keywordcase:case=upper main.pas + +As you see, options for the filter are given after a colon. As for ``-O``, the +filter name and options must be one shell word, so there may not be any spaces +around the colon. + + +Generating styles +----------------- + +Formatters normally don't output full style information. For example, the HTML +formatter by default only outputs ```` tags with ``class`` attributes. +Therefore, there's a special ``-S`` option for generating style definitions. +Usage is as follows:: + + $ pygmentize -f html -S colorful -a .syntax + +generates a CSS style sheet (because you selected the HTML formatter) for +the "colorful" style prepending a ".syntax" selector to all style rules. + +For an explanation what ``-a`` means for :doc:`a particular formatter +`, look for the `arg` argument for the formatter's +:meth:`.get_style_defs()` method. + + +Getting lexer names +------------------- + +.. versionadded:: 1.0 + +The ``-N`` option guesses a lexer name for a given filename, so that :: + + $ pygmentize -N setup.py + +will print out ``python``. It won't highlight anything yet. If no specific +lexer is known for that filename, ``text`` is printed. + +Custom Lexers and Formatters +---------------------------- + +.. versionadded:: 2.2 + +The ``-x`` flag enables custom lexers and formatters to be loaded +from files relative to the current directory. Create a file with a class named +CustomLexer or CustomFormatter, then specify it on the command line:: + + $ pygmentize -l your_lexer.py -f your_formatter.py -x + +You can also specify the name of your class with a colon:: + + $ pygmentize -l your_lexer.py:SomeLexer -x + +For more information, see :doc:`the Pygments documentation on Lexer development +`. + +Getting help +------------ + +The ``-L`` option lists lexers, formatters, along with their short +names and supported file name extensions, styles and filters. If you want to see +only one category, give it as an argument:: + + $ pygmentize -L filters + +will list only all installed filters. + +The ``-H`` option will give you detailed information (the same that can be found +in this documentation) about a lexer, formatter or filter. Usage is as follows:: + + $ pygmentize -H formatter html + +will print the help for the HTML formatter, while :: + + $ pygmentize -H lexer python + +will print the help for the Python lexer, etc. + + +A note on encodings +------------------- + +.. versionadded:: 0.9 + +Pygments tries to be smart regarding encodings in the formatting process: + +* If you give an ``encoding`` option, it will be used as the input and + output encoding. + +* If you give an ``outencoding`` option, it will override ``encoding`` + as the output encoding. + +* If you give an ``inencoding`` option, it will override ``encoding`` + as the input encoding. + +* If you don't give an encoding and have given an output file, the default + encoding for lexer and formatter is the terminal encoding or the default + locale encoding of the system. As a last resort, ``latin1`` is used (which + will pass through all non-ASCII characters). + +* If you don't give an encoding and haven't given an output file (that means + output is written to the console), the default encoding for lexer and + formatter is the terminal encoding (``sys.stdout.encoding``). diff --git a/doc/_build/html/_sources/docs/filterdevelopment.rst.txt b/doc/_build/html/_sources/docs/filterdevelopment.rst.txt new file mode 100644 index 0000000..fbcd0a0 --- /dev/null +++ b/doc/_build/html/_sources/docs/filterdevelopment.rst.txt @@ -0,0 +1,71 @@ +.. -*- mode: rst -*- + +===================== +Write your own filter +===================== + +.. versionadded:: 0.7 + +Writing own filters is very easy. All you have to do is to subclass +the `Filter` class and override the `filter` method. Additionally a +filter is instantiated with some keyword arguments you can use to +adjust the behavior of your filter. + + +Subclassing Filters +=================== + +As an example, we write a filter that converts all `Name.Function` tokens +to normal `Name` tokens to make the output less colorful. + +.. sourcecode:: python + + from pygments.util import get_bool_opt + from pygments.token import Name + from pygments.filter import Filter + + class UncolorFilter(Filter): + + def __init__(self, **options): + Filter.__init__(self, **options) + self.class_too = get_bool_opt(options, 'classtoo') + + def filter(self, lexer, stream): + for ttype, value in stream: + if ttype is Name.Function or (self.class_too and + ttype is Name.Class): + ttype = Name + yield ttype, value + +Some notes on the `lexer` argument: that can be quite confusing since it doesn't +need to be a lexer instance. If a filter was added by using the `add_filter()` +function of lexers, that lexer is registered for the filter. In that case +`lexer` will refer to the lexer that has registered the filter. It *can* be used +to access options passed to a lexer. Because it could be `None` you always have +to check for that case if you access it. + + +Using a decorator +================= + +You can also use the `simplefilter` decorator from the `pygments.filter` module: + +.. sourcecode:: python + + from pygments.util import get_bool_opt + from pygments.token import Name + from pygments.filter import simplefilter + + + @simplefilter + def uncolor(self, lexer, stream, options): + class_too = get_bool_opt(options, 'classtoo') + for ttype, value in stream: + if ttype is Name.Function or (class_too and + ttype is Name.Class): + ttype = Name + yield ttype, value + +The decorator automatically subclasses an internal filter class and uses the +decorated function as a method for filtering. (That's why there is a `self` +argument that you probably won't end up using in the method.) diff --git a/doc/_build/html/_sources/docs/filters.rst.txt b/doc/_build/html/_sources/docs/filters.rst.txt new file mode 100644 index 0000000..ff2519a --- /dev/null +++ b/doc/_build/html/_sources/docs/filters.rst.txt @@ -0,0 +1,41 @@ +.. -*- mode: rst -*- + +======= +Filters +======= + +.. versionadded:: 0.7 + +You can filter token streams coming from lexers to improve or annotate the +output. For example, you can highlight special words in comments, convert +keywords to upper or lowercase to enforce a style guide etc. + +To apply a filter, you can use the `add_filter()` method of a lexer: + +.. sourcecode:: pycon + + >>> from pygments.lexers import PythonLexer + >>> l = PythonLexer() + >>> # add a filter given by a string and options + >>> l.add_filter('codetagify', case='lower') + >>> l.filters + [] + >>> from pygments.filters import KeywordCaseFilter + >>> # or give an instance + >>> l.add_filter(KeywordCaseFilter(case='lower')) + +The `add_filter()` method takes keyword arguments which are forwarded to +the constructor of the filter. + +To get a list of all registered filters by name, you can use the +`get_all_filters()` function from the `pygments.filters` module that returns an +iterable for all known filters. + +If you want to write your own filter, have a look at :doc:`Write your own filter +`. + + +Builtin Filters +=============== + +.. pygmentsdoc:: filters diff --git a/doc/_build/html/_sources/docs/formatterdevelopment.rst.txt b/doc/_build/html/_sources/docs/formatterdevelopment.rst.txt new file mode 100644 index 0000000..2bfac05 --- /dev/null +++ b/doc/_build/html/_sources/docs/formatterdevelopment.rst.txt @@ -0,0 +1,169 @@ +.. -*- mode: rst -*- + +======================== +Write your own formatter +======================== + +As well as creating :doc:`your own lexer `, writing a new +formatter for Pygments is easy and straightforward. + +A formatter is a class that is initialized with some keyword arguments (the +formatter options) and that must provides a `format()` method. +Additionally a formatter should provide a `get_style_defs()` method that +returns the style definitions from the style in a form usable for the +formatter's output format. + + +Quickstart +========== + +The most basic formatter shipped with Pygments is the `NullFormatter`. It just +sends the value of a token to the output stream: + +.. sourcecode:: python + + from pygments.formatter import Formatter + + class NullFormatter(Formatter): + def format(self, tokensource, outfile): + for ttype, value in tokensource: + outfile.write(value) + +As you can see, the `format()` method is passed two parameters: `tokensource` +and `outfile`. The first is an iterable of ``(token_type, value)`` tuples, +the latter a file like object with a `write()` method. + +Because the formatter is that basic it doesn't overwrite the `get_style_defs()` +method. + + +Styles +====== + +Styles aren't instantiated but their metaclass provides some class functions +so that you can access the style definitions easily. + +Styles are iterable and yield tuples in the form ``(ttype, d)`` where `ttype` +is a token and `d` is a dict with the following keys: + +``'color'`` + Hexadecimal color value (eg: ``'ff0000'`` for red) or `None` if not + defined. + +``'bold'`` + `True` if the value should be bold + +``'italic'`` + `True` if the value should be italic + +``'underline'`` + `True` if the value should be underlined + +``'bgcolor'`` + Hexadecimal color value for the background (eg: ``'eeeeeee'`` for light + gray) or `None` if not defined. + +``'border'`` + Hexadecimal color value for the border (eg: ``'0000aa'`` for a dark + blue) or `None` for no border. + +Additional keys might appear in the future, formatters should ignore all keys +they don't support. + + +HTML 3.2 Formatter +================== + +For an more complex example, let's implement a HTML 3.2 Formatter. We don't +use CSS but inline markup (````, ````, etc). Because this isn't good +style this formatter isn't in the standard library ;-) + +.. sourcecode:: python + + from pygments.formatter import Formatter + + class OldHtmlFormatter(Formatter): + + def __init__(self, **options): + Formatter.__init__(self, **options) + + # create a dict of (start, end) tuples that wrap the + # value of a token so that we can use it in the format + # method later + self.styles = {} + + # we iterate over the `_styles` attribute of a style item + # that contains the parsed style values. + for token, style in self.style: + start = end = '' + # a style item is a tuple in the following form: + # colors are readily specified in hex: 'RRGGBB' + if style['color']: + start += '' % style['color'] + end = '' + end + if style['bold']: + start += '' + end = '' + end + if style['italic']: + start += '' + end = '' + end + if style['underline']: + start += '' + end = '' + end + self.styles[token] = (start, end) + + def format(self, tokensource, outfile): + # lastval is a string we use for caching + # because it's possible that an lexer yields a number + # of consecutive tokens with the same token type. + # to minimize the size of the generated html markup we + # try to join the values of same-type tokens here + lastval = '' + lasttype = None + + # wrap the whole output with
+            outfile.write('
')
+
+            for ttype, value in tokensource:
+                # if the token type doesn't exist in the stylemap
+                # we try it with the parent of the token type
+                # eg: parent of Token.Literal.String.Double is
+                # Token.Literal.String
+                while ttype not in self.styles:
+                    ttype = ttype.parent
+                if ttype == lasttype:
+                    # the current token type is the same of the last
+                    # iteration. cache it
+                    lastval += value
+                else:
+                    # not the same token as last iteration, but we
+                    # have some data in the buffer. wrap it with the
+                    # defined style and write it to the output file
+                    if lastval:
+                        stylebegin, styleend = self.styles[lasttype]
+                        outfile.write(stylebegin + lastval + styleend)
+                    # set lastval/lasttype to current values
+                    lastval = value
+                    lasttype = ttype
+
+            # if something is left in the buffer, write it to the
+            # output file, then close the opened 
 tag
+            if lastval:
+                stylebegin, styleend = self.styles[lasttype]
+                outfile.write(stylebegin + lastval + styleend)
+            outfile.write('
\n') + +The comments should explain it. Again, this formatter doesn't override the +`get_style_defs()` method. If we would have used CSS classes instead of +inline HTML markup, we would need to generate the CSS first. For that +purpose the `get_style_defs()` method exists: + + +Generating Style Definitions +============================ + +Some formatters like the `LatexFormatter` and the `HtmlFormatter` don't +output inline markup but reference either macros or css classes. Because +the definitions of those are not part of the output, the `get_style_defs()` +method exists. It is passed one parameter (if it's used and how it's used +is up to the formatter) and has to return a string or ``None``. diff --git a/doc/_build/html/_sources/docs/formatters.rst.txt b/doc/_build/html/_sources/docs/formatters.rst.txt new file mode 100644 index 0000000..9e7074e --- /dev/null +++ b/doc/_build/html/_sources/docs/formatters.rst.txt @@ -0,0 +1,48 @@ +.. -*- mode: rst -*- + +==================== +Available formatters +==================== + +This page lists all builtin formatters. + +Common options +============== + +All formatters support these options: + +`encoding` + If given, must be an encoding name (such as ``"utf-8"``). This will + be used to convert the token strings (which are Unicode strings) + to byte strings in the output (default: ``None``). + It will also be written in an encoding declaration suitable for the + document format if the `full` option is given (e.g. a ``meta + content-type`` directive in HTML or an invocation of the `inputenc` + package in LaTeX). + + If this is ``""`` or ``None``, Unicode strings will be written + to the output file, which most file-like objects do not support. + For example, `pygments.highlight()` will return a Unicode string if + called with no `outfile` argument and a formatter that has `encoding` + set to ``None`` because it uses a `StringIO.StringIO` object that + supports Unicode arguments to `write()`. Using a regular file object + wouldn't work. + + .. versionadded:: 0.6 + +`outencoding` + When using Pygments from the command line, any `encoding` option given is + passed to the lexer and the formatter. This is sometimes not desirable, + for example if you want to set the input encoding to ``"guess"``. + Therefore, `outencoding` has been introduced which overrides `encoding` + for the formatter if given. + + .. versionadded:: 0.7 + + +Formatter classes +================= + +All these classes are importable from :mod:`pygments.formatters`. + +.. pygmentsdoc:: formatters diff --git a/doc/_build/html/_sources/docs/index.rst.txt b/doc/_build/html/_sources/docs/index.rst.txt new file mode 100644 index 0000000..4cf710f --- /dev/null +++ b/doc/_build/html/_sources/docs/index.rst.txt @@ -0,0 +1,61 @@ +Pygments documentation +====================== + +**Starting with Pygments** + +.. toctree:: + :maxdepth: 1 + + ../download + quickstart + cmdline + +**Builtin components** + +.. toctree:: + :maxdepth: 1 + + lexers + filters + formatters + styles + +**Reference** + +.. toctree:: + :maxdepth: 1 + + unicode + tokens + api + +**Hacking for Pygments** + +.. toctree:: + :maxdepth: 1 + + lexerdevelopment + formatterdevelopment + filterdevelopment + plugins + +**Hints and tricks** + +.. toctree:: + :maxdepth: 1 + + rstdirective + moinmoin + java + integrate + +**About Pygments** + +.. toctree:: + :maxdepth: 1 + + changelog + authors + +If you find bugs or have suggestions for the documentation, please submit them +on `GitHub `. diff --git a/doc/_build/html/_sources/docs/integrate.rst.txt b/doc/_build/html/_sources/docs/integrate.rst.txt new file mode 100644 index 0000000..77daaa4 --- /dev/null +++ b/doc/_build/html/_sources/docs/integrate.rst.txt @@ -0,0 +1,40 @@ +.. -*- mode: rst -*- + +=================================== +Using Pygments in various scenarios +=================================== + +Markdown +-------- + +Since Pygments 0.9, the distribution ships Markdown_ preprocessor sample code +that uses Pygments to render source code in +:file:`external/markdown-processor.py`. You can copy and adapt it to your +liking. + +.. _Markdown: http://www.freewisdom.org/projects/python-markdown/ + +TextMate +-------- + +Antonio Cangiano has created a Pygments bundle for TextMate that allows to +colorize code via a simple menu option. It can be found here_. + +.. _here: http://antoniocangiano.com/2008/10/28/pygments-textmate-bundle/ + +Bash completion +--------------- + +The source distribution contains a file ``external/pygments.bashcomp`` that +sets up completion for the ``pygmentize`` command in bash. + +Wrappers for other languages +---------------------------- + +These libraries provide Pygments highlighting for users of other languages +than Python: + +* `pygments.rb `_, a pygments wrapper for Ruby +* `Clygments `_, a pygments wrapper for + Clojure +* `PHPygments `_, a pygments wrapper for PHP diff --git a/doc/_build/html/_sources/docs/java.rst.txt b/doc/_build/html/_sources/docs/java.rst.txt new file mode 100644 index 0000000..f553463 --- /dev/null +++ b/doc/_build/html/_sources/docs/java.rst.txt @@ -0,0 +1,70 @@ +===================== +Use Pygments in Java +===================== + +Thanks to `Jython `_ it is possible to use Pygments in +Java. + +This page is a simple tutorial to get an idea of how this works. You can +then look at the `Jython documentation `_ for more +advanced uses. + +Since version 1.5, Pygments is deployed on `Maven Central +`_ as a JAR, as is Jython +which makes it a lot easier to create a Java project. + +Here is an example of a `Maven `_ ``pom.xml`` file for a +project running Pygments: + +.. sourcecode:: xml + + + + + 4.0.0 + example + example + 1.0-SNAPSHOT + + + org.python + jython-standalone + 2.5.3 + + + org.pygments + pygments + 1.5 + runtime + + + + +The following Java example: + +.. sourcecode:: java + + PythonInterpreter interpreter = new PythonInterpreter(); + + // Set a variable with the content you want to work with + interpreter.set("code", code); + + // Simple use Pygments as you would in Python + interpreter.exec("from pygments import highlight\n" + + "from pygments.lexers import PythonLexer\n" + + "from pygments.formatters import HtmlFormatter\n" + + "\nresult = highlight(code, PythonLexer(), HtmlFormatter())"); + + // Get the result that has been set in a variable + System.out.println(interpreter.get("result", String.class)); + +will print something like: + +.. sourcecode:: html + +
+
print "Hello World"
+
diff --git a/doc/_build/html/_sources/docs/lexerdevelopment.rst.txt b/doc/_build/html/_sources/docs/lexerdevelopment.rst.txt new file mode 100644 index 0000000..5b6813f --- /dev/null +++ b/doc/_build/html/_sources/docs/lexerdevelopment.rst.txt @@ -0,0 +1,728 @@ +.. -*- mode: rst -*- + +.. highlight:: python + +==================== +Write your own lexer +==================== + +If a lexer for your favorite language is missing in the Pygments package, you +can easily write your own and extend Pygments. + +All you need can be found inside the :mod:`pygments.lexer` module. As you can +read in the :doc:`API documentation `, a lexer is a class that is +initialized with some keyword arguments (the lexer options) and that provides a +:meth:`.get_tokens_unprocessed()` method which is given a string or unicode +object with the data to lex. + +The :meth:`.get_tokens_unprocessed()` method must return an iterator or iterable +containing tuples in the form ``(index, token, value)``. Normally you don't +need to do this since there are base lexers that do most of the work and that +you can subclass. + + +RegexLexer +========== + +The lexer base class used by almost all of Pygments' lexers is the +:class:`RegexLexer`. This class allows you to define lexing rules in terms of +*regular expressions* for different *states*. + +States are groups of regular expressions that are matched against the input +string at the *current position*. If one of these expressions matches, a +corresponding action is performed (such as yielding a token with a specific +type, or changing state), the current position is set to where the last match +ended and the matching process continues with the first regex of the current +state. + +Lexer states are kept on a stack: each time a new state is entered, the new +state is pushed onto the stack. The most basic lexers (like the `DiffLexer`) +just need one state. + +Each state is defined as a list of tuples in the form (`regex`, `action`, +`new_state`) where the last item is optional. In the most basic form, `action` +is a token type (like `Name.Builtin`). That means: When `regex` matches, emit a +token with the match text and type `tokentype` and push `new_state` on the state +stack. If the new state is ``'#pop'``, the topmost state is popped from the +stack instead. To pop more than one state, use ``'#pop:2'`` and so on. +``'#push'`` is a synonym for pushing the current state on the stack. + +The following example shows the `DiffLexer` from the builtin lexers. Note that +it contains some additional attributes `name`, `aliases` and `filenames` which +aren't required for a lexer. They are used by the builtin lexer lookup +functions. :: + + from pygments.lexer import RegexLexer + from pygments.token import * + + class DiffLexer(RegexLexer): + name = 'Diff' + aliases = ['diff'] + filenames = ['*.diff'] + + tokens = { + 'root': [ + (r' .*\n', Text), + (r'\+.*\n', Generic.Inserted), + (r'-.*\n', Generic.Deleted), + (r'@.*\n', Generic.Subheading), + (r'Index.*\n', Generic.Heading), + (r'=.*\n', Generic.Heading), + (r'.*\n', Text), + ] + } + +As you can see this lexer only uses one state. When the lexer starts scanning +the text, it first checks if the current character is a space. If this is true +it scans everything until newline and returns the data as a `Text` token (which +is the "no special highlighting" token). + +If this rule doesn't match, it checks if the current char is a plus sign. And +so on. + +If no rule matches at the current position, the current char is emitted as an +`Error` token that indicates a lexing error, and the position is increased by +one. + + +Adding and testing a new lexer +============================== + +The easiest way to use a new lexer is to use Pygments' support for loading +the lexer from a file relative to your current directory. + +First, change the name of your lexer class to CustomLexer: + +.. code-block:: python + + from pygments.lexer import RegexLexer + from pygments.token import * + + class CustomLexer(RegexLexer): + """All your lexer code goes here!""" + +Then you can load the lexer from the command line with the additional +flag ``-x``: + +.. code-block:: console + + $ pygmentize -l your_lexer_file.py -x + +To specify a class name other than CustomLexer, append it with a colon: + +.. code-block:: console + + $ pygmentize -l your_lexer.py:SomeLexer -x + +Or, using the Python API: + +.. code-block:: python + + # For a lexer named CustomLexer + your_lexer = load_lexer_from_file(filename, **options) + + # For a lexer named MyNewLexer + your_named_lexer = load_lexer_from_file(filename, "MyNewLexer", **options) + +When loading custom lexers and formatters, be extremely careful to use only +trusted files; Pygments will perform the equivalent of ``eval`` on them. + +If you only want to use your lexer with the Pygments API, you can import and +instantiate the lexer yourself, then pass it to :func:`pygments.highlight`. + +To prepare your new lexer for inclusion in the Pygments distribution, so that it +will be found when passing filenames or lexer aliases from the command line, you +have to perform the following steps. + +First, change to the current directory containing the Pygments source code. You +will need to have either an unpacked source tarball, or (preferably) a copy +cloned from GitHub. + +.. code-block:: console + + $ cd .../pygments-main + +Select a matching module under ``pygments/lexers``, or create a new module for +your lexer class. + +Next, make sure the lexer is known from outside of the module. All modules in +the ``pygments.lexers`` package specify ``__all__``. For example, +``esoteric.py`` sets:: + + __all__ = ['BrainfuckLexer', 'BefungeLexer', ...] + +Add the name of your lexer class to this list (or create the list if your lexer +is the only class in the module). + +Finally the lexer can be made publicly known by rebuilding the lexer mapping: + +.. code-block:: console + + $ make mapfiles + +To test the new lexer, store an example file with the proper extension in +``tests/examplefiles``. For example, to test your ``DiffLexer``, add a +``tests/examplefiles/example.diff`` containing a sample diff output. + +Now you can use pygmentize to render your example to HTML: + +.. code-block:: console + + $ ./pygmentize -O full -f html -o /tmp/example.html tests/examplefiles/example.diff + +Note that this explicitly calls the ``pygmentize`` in the current directory +by preceding it with ``./``. This ensures your modifications are used. +Otherwise a possibly already installed, unmodified version without your new +lexer would have been called from the system search path (``$PATH``). + +To view the result, open ``/tmp/example.html`` in your browser. + +Once the example renders as expected, you should run the complete test suite: + +.. code-block:: console + + $ make test + +It also tests that your lexer fulfills the lexer API and certain invariants, +such as that the concatenation of all token text is the same as the input text. + + +Regex Flags +=========== + +You can either define regex flags locally in the regex (``r'(?x)foo bar'``) or +globally by adding a `flags` attribute to your lexer class. If no attribute is +defined, it defaults to `re.MULTILINE`. For more information about regular +expression flags see the page about `regular expressions`_ in the Python +documentation. + +.. _regular expressions: http://docs.python.org/library/re.html#regular-expression-syntax + + +Scanning multiple tokens at once +================================ + +So far, the `action` element in the rule tuple of regex, action and state has +been a single token type. Now we look at the first of several other possible +values. + +Here is a more complex lexer that highlights INI files. INI files consist of +sections, comments and ``key = value`` pairs:: + + from pygments.lexer import RegexLexer, bygroups + from pygments.token import * + + class IniLexer(RegexLexer): + name = 'INI' + aliases = ['ini', 'cfg'] + filenames = ['*.ini', '*.cfg'] + + tokens = { + 'root': [ + (r'\s+', Text), + (r';.*?$', Comment), + (r'\[.*?\]$', Keyword), + (r'(.*?)(\s*)(=)(\s*)(.*?)$', + bygroups(Name.Attribute, Text, Operator, Text, String)) + ] + } + +The lexer first looks for whitespace, comments and section names. Later it +looks for a line that looks like a key, value pair, separated by an ``'='`` +sign, and optional whitespace. + +The `bygroups` helper yields each capturing group in the regex with a different +token type. First the `Name.Attribute` token, then a `Text` token for the +optional whitespace, after that a `Operator` token for the equals sign. Then a +`Text` token for the whitespace again. The rest of the line is returned as +`String`. + +Note that for this to work, every part of the match must be inside a capturing +group (a ``(...)``), and there must not be any nested capturing groups. If you +nevertheless need a group, use a non-capturing group defined using this syntax: +``(?:some|words|here)`` (note the ``?:`` after the beginning parenthesis). + +If you find yourself needing a capturing group inside the regex which shouldn't +be part of the output but is used in the regular expressions for backreferencing +(eg: ``r'(<(foo|bar)>)(.*?)()'``), you can pass `None` to the bygroups +function and that group will be skipped in the output. + + +Changing states +=============== + +Many lexers need multiple states to work as expected. For example, some +languages allow multiline comments to be nested. Since this is a recursive +pattern it's impossible to lex just using regular expressions. + +Here is a lexer that recognizes C++ style comments (multi-line with ``/* */`` +and single-line with ``//`` until end of line):: + + from pygments.lexer import RegexLexer + from pygments.token import * + + class CppCommentLexer(RegexLexer): + name = 'Example Lexer with states' + + tokens = { + 'root': [ + (r'[^/]+', Text), + (r'/\*', Comment.Multiline, 'comment'), + (r'//.*?$', Comment.Singleline), + (r'/', Text) + ], + 'comment': [ + (r'[^*/]', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline) + ] + } + +This lexer starts lexing in the ``'root'`` state. It tries to match as much as +possible until it finds a slash (``'/'``). If the next character after the slash +is an asterisk (``'*'``) the `RegexLexer` sends those two characters to the +output stream marked as `Comment.Multiline` and continues lexing with the rules +defined in the ``'comment'`` state. + +If there wasn't an asterisk after the slash, the `RegexLexer` checks if it's a +Singleline comment (i.e. followed by a second slash). If this also wasn't the +case it must be a single slash, which is not a comment starter (the separate +regex for a single slash must also be given, else the slash would be marked as +an error token). + +Inside the ``'comment'`` state, we do the same thing again. Scan until the +lexer finds a star or slash. If it's the opening of a multiline comment, push +the ``'comment'`` state on the stack and continue scanning, again in the +``'comment'`` state. Else, check if it's the end of the multiline comment. If +yes, pop one state from the stack. + +Note: If you pop from an empty stack you'll get an `IndexError`. (There is an +easy way to prevent this from happening: don't ``'#pop'`` in the root state). + +If the `RegexLexer` encounters a newline that is flagged as an error token, the +stack is emptied and the lexer continues scanning in the ``'root'`` state. This +can help producing error-tolerant highlighting for erroneous input, e.g. when a +single-line string is not closed. + + +Advanced state tricks +===================== + +There are a few more things you can do with states: + +- You can push multiple states onto the stack if you give a tuple instead of a + simple string as the third item in a rule tuple. For example, if you want to + match a comment containing a directive, something like: + + .. code-block:: text + + /* rest of comment */ + + you can use this rule:: + + tokens = { + 'root': [ + (r'/\* <', Comment, ('comment', 'directive')), + ... + ], + 'directive': [ + (r'[^>]*', Comment.Directive), + (r'>', Comment, '#pop'), + ], + 'comment': [ + (r'[^*]+', Comment), + (r'\*/', Comment, '#pop'), + (r'\*', Comment), + ] + } + + When this encounters the above sample, first ``'comment'`` and ``'directive'`` + are pushed onto the stack, then the lexer continues in the directive state + until it finds the closing ``>``, then it continues in the comment state until + the closing ``*/``. Then, both states are popped from the stack again and + lexing continues in the root state. + + .. versionadded:: 0.9 + The tuple can contain the special ``'#push'`` and ``'#pop'`` (but not + ``'#pop:n'``) directives. + + +- You can include the rules of a state in the definition of another. This is + done by using `include` from `pygments.lexer`:: + + from pygments.lexer import RegexLexer, bygroups, include + from pygments.token import * + + class ExampleLexer(RegexLexer): + tokens = { + 'comments': [ + (r'/\*.*?\*/', Comment), + (r'//.*?\n', Comment), + ], + 'root': [ + include('comments'), + (r'(function )(\w+)( {)', + bygroups(Keyword, Name, Keyword), 'function'), + (r'.', Text), + ], + 'function': [ + (r'[^}/]+', Text), + include('comments'), + (r'/', Text), + (r'\}', Keyword, '#pop'), + ] + } + + This is a hypothetical lexer for a language that consist of functions and + comments. Because comments can occur at toplevel and in functions, we need + rules for comments in both states. As you can see, the `include` helper saves + repeating rules that occur more than once (in this example, the state + ``'comment'`` will never be entered by the lexer, as it's only there to be + included in ``'root'`` and ``'function'``). + +- Sometimes, you may want to "combine" a state from existing ones. This is + possible with the `combined` helper from `pygments.lexer`. + + If you, instead of a new state, write ``combined('state1', 'state2')`` as the + third item of a rule tuple, a new anonymous state will be formed from state1 + and state2 and if the rule matches, the lexer will enter this state. + + This is not used very often, but can be helpful in some cases, such as the + `PythonLexer`'s string literal processing. + +- If you want your lexer to start lexing in a different state you can modify the + stack by overriding the `get_tokens_unprocessed()` method:: + + from pygments.lexer import RegexLexer + + class ExampleLexer(RegexLexer): + tokens = {...} + + def get_tokens_unprocessed(self, text, stack=('root', 'otherstate')): + for item in RegexLexer.get_tokens_unprocessed(self, text, stack): + yield item + + Some lexers like the `PhpLexer` use this to make the leading ``', Name.Tag), + ], + 'script-content': [ + (r'(.+?)(<\s*/\s*script\s*>)', + bygroups(using(JavascriptLexer), Name.Tag), + '#pop'), + ] + } + +Here the content of a ```` end tag is processed by the `JavascriptLexer`, +while the end tag is yielded as a normal token with the `Name.Tag` type. + +Also note the ``(r'<\s*script\s*', Name.Tag, ('script-content', 'tag'))`` rule. +Here, two states are pushed onto the state stack, ``'script-content'`` and +``'tag'``. That means that first ``'tag'`` is processed, which will lex +attributes and the closing ``>``, then the ``'tag'`` state is popped and the +next state on top of the stack will be ``'script-content'``. + +Since you cannot refer to the class currently being defined, use `this` +(imported from `pygments.lexer`) to refer to the current lexer class, i.e. +``using(this)``. This construct may seem unnecessary, but this is often the +most obvious way of lexing arbitrary syntax between fixed delimiters without +introducing deeply nested states. + +The `using()` helper has a special keyword argument, `state`, which works as +follows: if given, the lexer to use initially is not in the ``"root"`` state, +but in the state given by this argument. This does not work with advanced +`RegexLexer` subclasses such as `ExtendedRegexLexer` (see below). + +Any other keywords arguments passed to `using()` are added to the keyword +arguments used to create the lexer. + + +Delegating Lexer +================ + +Another approach for nested lexers is the `DelegatingLexer` which is for example +used for the template engine lexers. It takes two lexers as arguments on +initialisation: a `root_lexer` and a `language_lexer`. + +The input is processed as follows: First, the whole text is lexed with the +`language_lexer`. All tokens yielded with the special type of ``Other`` are +then concatenated and given to the `root_lexer`. The language tokens of the +`language_lexer` are then inserted into the `root_lexer`'s token stream at the +appropriate positions. :: + + from pygments.lexer import DelegatingLexer + from pygments.lexers.web import HtmlLexer, PhpLexer + + class HtmlPhpLexer(DelegatingLexer): + def __init__(self, **options): + super(HtmlPhpLexer, self).__init__(HtmlLexer, PhpLexer, **options) + +This procedure ensures that e.g. HTML with template tags in it is highlighted +correctly even if the template tags are put into HTML tags or attributes. + +If you want to change the needle token ``Other`` to something else, you can give +the lexer another token type as the third parameter:: + + DelegatingLexer.__init__(MyLexer, OtherLexer, Text, **options) + + +Callbacks +========= + +Sometimes the grammar of a language is so complex that a lexer would be unable +to process it just by using regular expressions and stacks. + +For this, the `RegexLexer` allows callbacks to be given in rule tuples, instead +of token types (`bygroups` and `using` are nothing else but preimplemented +callbacks). The callback must be a function taking two arguments: + +* the lexer itself +* the match object for the last matched rule + +The callback must then return an iterable of (or simply yield) ``(index, +tokentype, value)`` tuples, which are then just passed through by +`get_tokens_unprocessed()`. The ``index`` here is the position of the token in +the input string, ``tokentype`` is the normal token type (like `Name.Builtin`), +and ``value`` the associated part of the input string. + +You can see an example here:: + + from pygments.lexer import RegexLexer + from pygments.token import Generic + + class HypotheticLexer(RegexLexer): + + def headline_callback(lexer, match): + equal_signs = match.group(1) + text = match.group(2) + yield match.start(), Generic.Headline, equal_signs + text + equal_signs + + tokens = { + 'root': [ + (r'(=+)(.*?)(\1)', headline_callback) + ] + } + +If the regex for the `headline_callback` matches, the function is called with +the match object. Note that after the callback is done, processing continues +normally, that is, after the end of the previous match. The callback has no +possibility to influence the position. + +There are not really any simple examples for lexer callbacks, but you can see +them in action e.g. in the `SMLLexer` class in `ml.py`_. + +.. _ml.py: https://github.com/pygments/pygments/blob/master/pygments/lexers/ml.py + + +The ExtendedRegexLexer class +============================ + +The `RegexLexer`, even with callbacks, unfortunately isn't powerful enough for +the funky syntax rules of languages such as Ruby. + +But fear not; even then you don't have to abandon the regular expression +approach: Pygments has a subclass of `RegexLexer`, the `ExtendedRegexLexer`. +All features known from RegexLexers are available here too, and the tokens are +specified in exactly the same way, *except* for one detail: + +The `get_tokens_unprocessed()` method holds its internal state data not as local +variables, but in an instance of the `pygments.lexer.LexerContext` class, and +that instance is passed to callbacks as a third argument. This means that you +can modify the lexer state in callbacks. + +The `LexerContext` class has the following members: + +* `text` -- the input text +* `pos` -- the current starting position that is used for matching regexes +* `stack` -- a list containing the state stack +* `end` -- the maximum position to which regexes are matched, this defaults to + the length of `text` + +Additionally, the `get_tokens_unprocessed()` method can be given a +`LexerContext` instead of a string and will then process this context instead of +creating a new one for the string argument. + +Note that because you can set the current position to anything in the callback, +it won't be automatically be set by the caller after the callback is finished. +For example, this is how the hypothetical lexer above would be written with the +`ExtendedRegexLexer`:: + + from pygments.lexer import ExtendedRegexLexer + from pygments.token import Generic + + class ExHypotheticLexer(ExtendedRegexLexer): + + def headline_callback(lexer, match, ctx): + equal_signs = match.group(1) + text = match.group(2) + yield match.start(), Generic.Headline, equal_signs + text + equal_signs + ctx.pos = match.end() + + tokens = { + 'root': [ + (r'(=+)(.*?)(\1)', headline_callback) + ] + } + +This might sound confusing (and it can really be). But it is needed, and for an +example look at the Ruby lexer in `ruby.py`_. + +.. _ruby.py: https://github.com/pygments/pygments/blob/master/pygments/lexers/ruby.py + + +Handling Lists of Keywords +========================== + +For a relatively short list (hundreds) you can construct an optimized regular +expression directly using ``words()`` (longer lists, see next section). This +function handles a few things for you automatically, including escaping +metacharacters and Python's first-match rather than longest-match in +alternations. Feel free to put the lists themselves in +``pygments/lexers/_$lang_builtins.py`` (see examples there), and generated by +code if possible. + +An example of using ``words()`` is something like:: + + from pygments.lexer import RegexLexer, words, Name + + class MyLexer(RegexLexer): + + tokens = { + 'root': [ + (words(('else', 'elseif'), suffix=r'\b'), Name.Builtin), + (r'\w+', Name), + ], + } + +As you can see, you can add ``prefix`` and ``suffix`` parts to the constructed +regex. + + +Modifying Token Streams +======================= + +Some languages ship a lot of builtin functions (for example PHP). The total +amount of those functions differs from system to system because not everybody +has every extension installed. In the case of PHP there are over 3000 builtin +functions. That's an incredibly huge amount of functions, much more than you +want to put into a regular expression. + +But because only `Name` tokens can be function names this is solvable by +overriding the ``get_tokens_unprocessed()`` method. The following lexer +subclasses the `PythonLexer` so that it highlights some additional names as +pseudo keywords:: + + from pygments.lexers.python import PythonLexer + from pygments.token import Name, Keyword + + class MyPythonLexer(PythonLexer): + EXTRA_KEYWORDS = set(('foo', 'bar', 'foobar', 'barfoo', 'spam', 'eggs')) + + def get_tokens_unprocessed(self, text): + for index, token, value in PythonLexer.get_tokens_unprocessed(self, text): + if token is Name and value in self.EXTRA_KEYWORDS: + yield index, Keyword.Pseudo, value + else: + yield index, token, value + +The `PhpLexer` and `LuaLexer` use this method to resolve builtin functions. diff --git a/doc/_build/html/_sources/docs/lexers.rst.txt b/doc/_build/html/_sources/docs/lexers.rst.txt new file mode 100644 index 0000000..ef40f14 --- /dev/null +++ b/doc/_build/html/_sources/docs/lexers.rst.txt @@ -0,0 +1,69 @@ +.. -*- mode: rst -*- + +================ +Available lexers +================ + +This page lists all available builtin lexers and the options they take. + +Currently, **all lexers** support these options: + +`stripnl` + Strip leading and trailing newlines from the input (default: ``True``) + +`stripall` + Strip all leading and trailing whitespace from the input (default: + ``False``). + +`ensurenl` + Make sure that the input ends with a newline (default: ``True``). This + is required for some lexers that consume input linewise. + + .. versionadded:: 1.3 + +`tabsize` + If given and greater than 0, expand tabs in the input (default: ``0``). + +`encoding` + If given, must be an encoding name (such as ``"utf-8"``). This encoding + will be used to convert the input string to Unicode (if it is not already + a Unicode string). The default is ``"guess"``. + + If this option is set to ``"guess"``, a simple UTF-8 vs. Latin-1 + detection is used, if it is set to ``"chardet"``, the + `chardet library `_ is used to + guess the encoding of the input. + + .. versionadded:: 0.6 + + +The "Short Names" field lists the identifiers that can be used with the +`get_lexer_by_name()` function. + +These lexers are builtin and can be imported from `pygments.lexers`: + +.. pygmentsdoc:: lexers + + +Iterating over all lexers +------------------------- + +.. versionadded:: 0.6 + +To get all lexers (both the builtin and the plugin ones), you can +use the `get_all_lexers()` function from the `pygments.lexers` +module: + +.. sourcecode:: pycon + + >>> from pygments.lexers import get_all_lexers + >>> i = get_all_lexers() + >>> i.next() + ('Diff', ('diff',), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')) + >>> i.next() + ('Delphi', ('delphi', 'objectpascal', 'pas', 'pascal'), ('*.pas',), ('text/x-pascal',)) + >>> i.next() + ('XML+Ruby', ('xml+erb', 'xml+ruby'), (), ()) + +As you can see, the return value is an iterator which yields tuples +in the form ``(name, aliases, filetypes, mimetypes)``. diff --git a/doc/_build/html/_sources/docs/moinmoin.rst.txt b/doc/_build/html/_sources/docs/moinmoin.rst.txt new file mode 100644 index 0000000..8b2216b --- /dev/null +++ b/doc/_build/html/_sources/docs/moinmoin.rst.txt @@ -0,0 +1,39 @@ +.. -*- mode: rst -*- + +============================ +Using Pygments with MoinMoin +============================ + +From Pygments 0.7, the source distribution ships a `Moin`_ parser plugin that +can be used to get Pygments highlighting in Moin wiki pages. + +To use it, copy the file `external/moin-parser.py` from the Pygments +distribution to the `data/plugin/parser` subdirectory of your Moin instance. +Edit the options at the top of the file (currently ``ATTACHMENTS`` and +``INLINESTYLES``) and rename the file to the name that the parser directive +should have. For example, if you name the file ``code.py``, you can get a +highlighted Python code sample with this Wiki markup:: + + {{{ + #!code python + [...] + }}} + +where ``python`` is the Pygments name of the lexer to use. + +Additionally, if you set the ``ATTACHMENTS`` option to True, Pygments will also +be called for all attachments for whose filenames there is no other parser +registered. + +You are responsible for including CSS rules that will map the Pygments CSS +classes to colors. You can output a stylesheet file with `pygmentize`, put it +into the `htdocs` directory of your Moin instance and then include it in the +`stylesheets` configuration option in the Moin config, e.g.:: + + stylesheets = [('screen', '/htdocs/pygments.css')] + +If you do not want to do that and are willing to accept larger HTML output, you +can set the ``INLINESTYLES`` option to True. + + +.. _Moin: http://moinmoin.wikiwikiweb.de/ diff --git a/doc/_build/html/_sources/docs/plugins.rst.txt b/doc/_build/html/_sources/docs/plugins.rst.txt new file mode 100644 index 0000000..a6f8d7b --- /dev/null +++ b/doc/_build/html/_sources/docs/plugins.rst.txt @@ -0,0 +1,93 @@ +================ +Register Plugins +================ + +If you want to extend Pygments without hacking the sources, but want to +use the lexer/formatter/style/filter lookup functions (`lexers.get_lexer_by_name` +et al.), you can use `setuptools`_ entrypoints to add new lexers, formatters +or styles as if they were in the Pygments core. + +.. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools + +That means you can use your highlighter modules with the `pygmentize` script, +which relies on the mentioned functions. + + +Entrypoints +=========== + +Here is a list of setuptools entrypoints that Pygments understands: + +`pygments.lexers` + + This entrypoint is used for adding new lexers to the Pygments core. + The name of the entrypoint values doesn't really matter, Pygments extracts + required metadata from the class definition: + + .. sourcecode:: ini + + [pygments.lexers] + yourlexer = yourmodule:YourLexer + + Note that you have to define ``name``, ``aliases`` and ``filename`` + attributes so that you can use the highlighter from the command line: + + .. sourcecode:: python + + class YourLexer(...): + name = 'Name Of Your Lexer' + aliases = ['alias'] + filenames = ['*.ext'] + + +`pygments.formatters` + + You can use this entrypoint to add new formatters to Pygments. The + name of an entrypoint item is the name of the formatter. If you + prefix the name with a slash it's used as a filename pattern: + + .. sourcecode:: ini + + [pygments.formatters] + yourformatter = yourmodule:YourFormatter + /.ext = yourmodule:YourFormatter + + +`pygments.styles` + + To add a new style you can use this entrypoint. The name of the entrypoint + is the name of the style: + + .. sourcecode:: ini + + [pygments.styles] + yourstyle = yourmodule:YourStyle + + +`pygments.filters` + + Use this entrypoint to register a new filter. The name of the + entrypoint is the name of the filter: + + .. sourcecode:: ini + + [pygments.filters] + yourfilter = yourmodule:YourFilter + + +How To Use Entrypoints +====================== + +This documentation doesn't explain how to use those entrypoints because this is +covered in the `setuptools documentation`_. That page should cover everything +you need to write a plugin. + +.. _setuptools documentation: http://peak.telecommunity.com/DevCenter/setuptools + + +Extending The Core +================== + +If you have written a Pygments plugin that is open source, please inform us +about that. There is a high chance that we'll add it to the Pygments +distribution. diff --git a/doc/_build/html/_sources/docs/quickstart.rst.txt b/doc/_build/html/_sources/docs/quickstart.rst.txt new file mode 100644 index 0000000..3a823e7 --- /dev/null +++ b/doc/_build/html/_sources/docs/quickstart.rst.txt @@ -0,0 +1,205 @@ +.. -*- mode: rst -*- + +=========================== +Introduction and Quickstart +=========================== + + +Welcome to Pygments! This document explains the basic concepts and terms and +gives a few examples of how to use the library. + + +Architecture +============ + +There are four types of components that work together highlighting a piece of +code: + +* A **lexer** splits the source into tokens, fragments of the source that + have a token type that determines what the text represents semantically + (e.g., keyword, string, or comment). There is a lexer for every language + or markup format that Pygments supports. +* The token stream can be piped through **filters**, which usually modify + the token types or text fragments, e.g. uppercasing all keywords. +* A **formatter** then takes the token stream and writes it to an output + file, in a format such as HTML, LaTeX or RTF. +* While writing the output, a **style** determines how to highlight all the + different token types. It maps them to attributes like "red and bold". + + +Example +======= + +Here is a small example for highlighting Python code: + +.. sourcecode:: python + + from pygments import highlight + from pygments.lexers import PythonLexer + from pygments.formatters import HtmlFormatter + + code = 'print "Hello World"' + print(highlight(code, PythonLexer(), HtmlFormatter())) + +which prints something like this: + +.. sourcecode:: html + +
+
print "Hello World"
+
+ +As you can see, Pygments uses CSS classes (by default, but you can change that) +instead of inline styles in order to avoid outputting redundant style information over +and over. A CSS stylesheet that contains all CSS classes possibly used in the output +can be produced by: + +.. sourcecode:: python + + print(HtmlFormatter().get_style_defs('.highlight')) + +The argument to :func:`get_style_defs` is used as an additional CSS selector: +the output may look like this: + +.. sourcecode:: css + + .highlight .k { color: #AA22FF; font-weight: bold } + .highlight .s { color: #BB4444 } + ... + + +Options +======= + +The :func:`highlight()` function supports a fourth argument called *outfile*, it +must be a file object if given. The formatted output will then be written to +this file instead of being returned as a string. + +Lexers and formatters both support options. They are given to them as keyword +arguments either to the class or to the lookup method: + +.. sourcecode:: python + + from pygments import highlight + from pygments.lexers import get_lexer_by_name + from pygments.formatters import HtmlFormatter + + lexer = get_lexer_by_name("python", stripall=True) + formatter = HtmlFormatter(linenos=True, cssclass="source") + result = highlight(code, lexer, formatter) + +This makes the lexer strip all leading and trailing whitespace from the input +(`stripall` option), lets the formatter output line numbers (`linenos` option), +and sets the wrapping ``
``'s class to ``source`` (instead of +``highlight``). + +Important options include: + +`encoding` : for lexers and formatters + Since Pygments uses Unicode strings internally, this determines which + encoding will be used to convert to or from byte strings. +`style` : for formatters + The name of the style to use when writing the output. + + +For an overview of builtin lexers and formatters and their options, visit the +:doc:`lexer ` and :doc:`formatters ` lists. + +For a documentation on filters, see :doc:`this page `. + + +Lexer and formatter lookup +========================== + +If you want to lookup a built-in lexer by its alias or a filename, you can use +one of the following methods: + +.. sourcecode:: pycon + + >>> from pygments.lexers import (get_lexer_by_name, + ... get_lexer_for_filename, get_lexer_for_mimetype) + + >>> get_lexer_by_name('python') + + + >>> get_lexer_for_filename('spam.rb') + + + >>> get_lexer_for_mimetype('text/x-perl') + + +All these functions accept keyword arguments; they will be passed to the lexer +as options. + +A similar API is available for formatters: use :func:`.get_formatter_by_name()` +and :func:`.get_formatter_for_filename()` from the :mod:`pygments.formatters` +module for this purpose. + + +Guessing lexers +=============== + +If you don't know the content of the file, or you want to highlight a file +whose extension is ambiguous, such as ``.html`` (which could contain plain HTML +or some template tags), use these functions: + +.. sourcecode:: pycon + + >>> from pygments.lexers import guess_lexer, guess_lexer_for_filename + + >>> guess_lexer('#!/usr/bin/python\nprint "Hello World!"') + + + >>> guess_lexer_for_filename('test.py', 'print "Hello World!"') + + +:func:`.guess_lexer()` passes the given content to the lexer classes' +:meth:`analyse_text()` method and returns the one for which it returns the +highest number. + +All lexers have two different filename pattern lists: the primary and the +secondary one. The :func:`.get_lexer_for_filename()` function only uses the +primary list, whose entries are supposed to be unique among all lexers. +:func:`.guess_lexer_for_filename()`, however, will first loop through all lexers +and look at the primary and secondary filename patterns if the filename matches. +If only one lexer matches, it is returned, else the guessing mechanism of +:func:`.guess_lexer()` is used with the matching lexers. + +As usual, keyword arguments to these functions are given to the created lexer +as options. + + +Command line usage +================== + +You can use Pygments from the command line, using the :program:`pygmentize` +script:: + + $ pygmentize test.py + +will highlight the Python file test.py using ANSI escape sequences +(a.k.a. terminal colors) and print the result to standard output. + +To output HTML, use the ``-f`` option:: + + $ pygmentize -f html -o test.html test.py + +to write an HTML-highlighted version of test.py to the file test.html. +Note that it will only be a snippet of HTML, if you want a full HTML document, +use the "full" option:: + + $ pygmentize -f html -O full -o test.html test.py + +This will produce a full HTML document with included stylesheet. + +A style can be selected with ``-O style=``. + +If you need a stylesheet for an existing HTML file using Pygments CSS classes, +it can be created with:: + + $ pygmentize -S default -f html > style.css + +where ``default`` is the style name. + +More options and tricks and be found in the :doc:`command line reference +`. diff --git a/doc/_build/html/_sources/docs/rstdirective.rst.txt b/doc/_build/html/_sources/docs/rstdirective.rst.txt new file mode 100644 index 0000000..c0d503b --- /dev/null +++ b/doc/_build/html/_sources/docs/rstdirective.rst.txt @@ -0,0 +1,22 @@ +.. -*- mode: rst -*- + +================================ +Using Pygments in ReST documents +================================ + +Many Python people use `ReST`_ for documentation their sourcecode, programs, +scripts et cetera. This also means that documentation often includes sourcecode +samples or snippets. + +You can easily enable Pygments support for your ReST texts using a custom +directive -- this is also how this documentation displays source code. + +From Pygments 0.9, the directive is shipped in the distribution as +`external/rst-directive.py`. You can copy and adapt this code to your liking. + +.. removed -- too confusing + *Loosely related note:* The ReST lexer now recognizes ``.. sourcecode::`` and + ``.. code::`` directives and highlights the contents in the specified language + if the `handlecodeblocks` option is true. + +.. _ReST: http://docutils.sf.net/rst.html diff --git a/doc/_build/html/_sources/docs/styles.rst.txt b/doc/_build/html/_sources/docs/styles.rst.txt new file mode 100644 index 0000000..570293a --- /dev/null +++ b/doc/_build/html/_sources/docs/styles.rst.txt @@ -0,0 +1,232 @@ +.. -*- mode: rst -*- + +====== +Styles +====== + +Pygments comes with some builtin styles that work for both the HTML and +LaTeX formatter. + +The builtin styles can be looked up with the `get_style_by_name` function: + +.. sourcecode:: pycon + + >>> from pygments.styles import get_style_by_name + >>> get_style_by_name('colorful') + + +You can pass a instance of a `Style` class to a formatter as the `style` +option in form of a string: + +.. sourcecode:: pycon + + >>> from pygments.styles import get_style_by_name + >>> from pygments.formatters import HtmlFormatter + >>> HtmlFormatter(style='colorful').style + + +Or you can also import your own style (which must be a subclass of +`pygments.style.Style`) and pass it to the formatter: + +.. sourcecode:: pycon + + >>> from yourapp.yourmodule import YourStyle + >>> from pygments.formatters import HtmlFormatter + >>> HtmlFormatter(style=YourStyle).style + + + +Creating Own Styles +=================== + +So, how to create a style? All you have to do is to subclass `Style` and +define some styles: + +.. sourcecode:: python + + from pygments.style import Style + from pygments.token import Keyword, Name, Comment, String, Error, \ + Number, Operator, Generic + + class YourStyle(Style): + default_style = "" + styles = { + Comment: 'italic #888', + Keyword: 'bold #005', + Name: '#f00', + Name.Function: '#0f0', + Name.Class: 'bold #0f0', + String: 'bg:#eee #111' + } + +That's it. There are just a few rules. When you define a style for `Name` +the style automatically also affects `Name.Function` and so on. If you +defined ``'bold'`` and you don't want boldface for a subtoken use ``'nobold'``. + +(Philosophy: the styles aren't written in CSS syntax since this way +they can be used for a variety of formatters.) + +`default_style` is the style inherited by all token types. + +To make the style usable for Pygments, you must + +* either register it as a plugin (see :doc:`the plugin docs `) +* or drop it into the `styles` subpackage of your Pygments distribution one style + class per style, where the file name is the style name and the class name is + `StylenameClass`. For example, if your style should be called + ``"mondrian"``, name the class `MondrianStyle`, put it into the file + ``mondrian.py`` and this file into the ``pygments.styles`` subpackage + directory. + + +Style Rules +=========== + +Here a small overview of all allowed styles: + +``bold`` + render text as bold +``nobold`` + don't render text as bold (to prevent subtokens being highlighted bold) +``italic`` + render text italic +``noitalic`` + don't render text as italic +``underline`` + render text underlined +``nounderline`` + don't render text underlined +``bg:`` + transparent background +``bg:#000000`` + background color (black) +``border:`` + no border +``border:#ffffff`` + border color (white) +``#ff0000`` + text color (red) +``noinherit`` + don't inherit styles from supertoken + +Note that there may not be a space between ``bg:`` and the color value +since the style definition string is split at whitespace. +Also, using named colors is not allowed since the supported color names +vary for different formatters. + +Furthermore, not all lexers might support every style. + + +Builtin Styles +============== + +Pygments ships some builtin styles which are maintained by the Pygments team. + +To get a list of known styles you can use this snippet: + +.. sourcecode:: pycon + + >>> from pygments.styles import STYLE_MAP + >>> STYLE_MAP.keys() + ['default', 'emacs', 'friendly', 'colorful'] + + +Getting a list of available styles +================================== + +.. versionadded:: 0.6 + +Because it could be that a plugin registered a style, there is +a way to iterate over all styles: + +.. sourcecode:: pycon + + >>> from pygments.styles import get_all_styles + >>> styles = list(get_all_styles()) + + +.. _AnsiTerminalStyle: + +Terminal Styles +=============== + +.. versionadded:: 2.2 + +Custom styles used with the 256-color terminal formatter can also map colors to +use the 8 default ANSI colors. To do so, use ``ansigreen``, ``ansibrightred`` or +any other colors defined in :attr:`pygments.style.ansicolors`. Foreground ANSI +colors will be mapped to the corresponding `escape codes 30 to 37 +`_ thus respecting any +custom color mapping and themes provided by many terminal emulators. Light +variants are treated as foreground color with and an added bold flag. +``bg:ansi`` will also be respected, except the light variant will be the +same shade as their dark variant. + +See the following example where the color of the string ``"hello world"`` is +governed by the escape sequence ``\x1b[34;01m`` (Ansi bright blue, Bold, 41 being red +background) instead of an extended foreground & background color. + +.. sourcecode:: pycon + + >>> from pygments import highlight + >>> from pygments.style import Style + >>> from pygments.token import Token + >>> from pygments.lexers import Python3Lexer + >>> from pygments.formatters import Terminal256Formatter + + >>> class MyStyle(Style): + styles = { + Token.String: 'ansibrightblue bg:ansibrightred', + } + + >>> code = 'print("Hello World")' + >>> result = highlight(code, Python3Lexer(), Terminal256Formatter(style=MyStyle)) + >>> print(result.encode()) + b'\x1b[34;41;01m"\x1b[39;49;00m\x1b[34;41;01mHello World\x1b[39;49;00m\x1b[34;41;01m"\x1b[39;49;00m' + +Colors specified using ``ansi*`` are converted to a default set of RGB colors +when used with formatters other than the terminal-256 formatter. + +By definition of ANSI, the following colors are considered "light" colors, and +will be rendered by most terminals as bold: + +- "brightblack" (darkgrey), "brightred", "brightgreen", "brightyellow", "brightblue", + "brightmagenta", "brightcyan", "white" + +The following are considered "dark" colors and will be rendered as non-bold: + +- "black", "red", "green", "yellow", "blue", "magenta", "cyan", + "gray" + +Exact behavior might depends on the terminal emulator you are using, and its +settings. + +.. _new-ansi-color-names: + +.. versionchanged:: 2.4 + +The definition of the ANSI color names has changed. +New names are easier to understand and align to the colors used in other projects. + +===================== ==================== +New names Pygments up to 2.3 +===================== ==================== +``ansiblack`` ``#ansiblack`` +``ansired`` ``#ansidarkred`` +``ansigreen`` ``#ansidarkgreen`` +``ansiyellow`` ``#ansibrown`` +``ansiblue`` ``#ansidarkblue`` +``ansimagenta`` ``#ansipurple`` +``ansicyan`` ``#ansiteal`` +``ansigray`` ``#ansilightgray`` +``ansibrightblack`` ``#ansidarkgray`` +``ansibrightred`` ``#ansired`` +``ansibrightgreen`` ``#ansigreen`` +``ansibrightyellow`` ``#ansiyellow`` +``ansibrightblue`` ``#ansiblue`` +``ansibrightmagenta`` ``#ansifuchsia`` +``ansibrightcyan`` ``#ansiturquoise`` +``ansiwhite`` ``#ansiwhite`` +===================== ==================== + +Old ANSI color names are deprecated but will still work. diff --git a/doc/_build/html/_sources/docs/tokens.rst.txt b/doc/_build/html/_sources/docs/tokens.rst.txt new file mode 100644 index 0000000..801fc63 --- /dev/null +++ b/doc/_build/html/_sources/docs/tokens.rst.txt @@ -0,0 +1,372 @@ +.. -*- mode: rst -*- + +============== +Builtin Tokens +============== + +.. module:: pygments.token + +In the :mod:`pygments.token` module, there is a special object called `Token` +that is used to create token types. + +You can create a new token type by accessing an attribute of `Token`: + +.. sourcecode:: pycon + + >>> from pygments.token import Token + >>> Token.String + Token.String + >>> Token.String is Token.String + True + +Note that tokens are singletons so you can use the ``is`` operator for comparing +token types. + +As of Pygments 0.7 you can also use the ``in`` operator to perform set tests: + +.. sourcecode:: pycon + + >>> from pygments.token import Comment + >>> Comment.Single in Comment + True + >>> Comment in Comment.Multi + False + +This can be useful in :doc:`filters ` and if you write lexers on your +own without using the base lexers. + +You can also split a token type into a hierarchy, and get the parent of it: + +.. sourcecode:: pycon + + >>> String.split() + [Token, Token.Literal, Token.Literal.String] + >>> String.parent + Token.Literal + +In principle, you can create an unlimited number of token types but nobody can +guarantee that a style would define style rules for a token type. Because of +that, Pygments proposes some global token types defined in the +`pygments.token.STANDARD_TYPES` dict. + +For some tokens aliases are already defined: + +.. sourcecode:: pycon + + >>> from pygments.token import String + >>> String + Token.Literal.String + +Inside the :mod:`pygments.token` module the following aliases are defined: + +============= ============================ ==================================== +`Text` `Token.Text` for any type of text data +`Whitespace` `Token.Text.Whitespace` for specially highlighted whitespace +`Error` `Token.Error` represents lexer errors +`Other` `Token.Other` special token for data not + matched by a parser (e.g. HTML + markup in PHP code) +`Keyword` `Token.Keyword` any kind of keywords +`Name` `Token.Name` variable/function names +`Literal` `Token.Literal` Any literals +`String` `Token.Literal.String` string literals +`Number` `Token.Literal.Number` number literals +`Operator` `Token.Operator` operators (``+``, ``not``...) +`Punctuation` `Token.Punctuation` punctuation (``[``, ``(``...) +`Comment` `Token.Comment` any kind of comments +`Generic` `Token.Generic` generic tokens (have a look at + the explanation below) +============= ============================ ==================================== + +The `Whitespace` token type is new in Pygments 0.8. It is used only by the +`VisibleWhitespaceFilter` currently. + +Normally you just create token types using the already defined aliases. For each +of those token aliases, a number of subtypes exists (excluding the special tokens +`Token.Text`, `Token.Error` and `Token.Other`) + +The `is_token_subtype()` function in the `pygments.token` module can be used to +test if a token type is a subtype of another (such as `Name.Tag` and `Name`). +(This is the same as ``Name.Tag in Name``. The overloaded `in` operator was newly +introduced in Pygments 0.7, the function still exists for backwards +compatibility.) + +With Pygments 0.7, it's also possible to convert strings to token types (for example +if you want to supply a token from the command line): + +.. sourcecode:: pycon + + >>> from pygments.token import String, string_to_tokentype + >>> string_to_tokentype("String") + Token.Literal.String + >>> string_to_tokentype("Token.Literal.String") + Token.Literal.String + >>> string_to_tokentype(String) + Token.Literal.String + + +Keyword Tokens +============== + +`Keyword` + For any kind of keyword (especially if it doesn't match any of the + subtypes of course). + +`Keyword.Constant` + For keywords that are constants (e.g. ``None`` in future Python versions). + +`Keyword.Declaration` + For keywords used for variable declaration (e.g. ``var`` in some programming + languages like JavaScript). + +`Keyword.Namespace` + For keywords used for namespace declarations (e.g. ``import`` in Python and + Java and ``package`` in Java). + +`Keyword.Pseudo` + For keywords that aren't really keywords (e.g. ``None`` in old Python + versions). + +`Keyword.Reserved` + For reserved keywords. + +`Keyword.Type` + For builtin types that can't be used as identifiers (e.g. ``int``, + ``char`` etc. in C). + + +Name Tokens +=========== + +`Name` + For any name (variable names, function names, classes). + +`Name.Attribute` + For all attributes (e.g. in HTML tags). + +`Name.Builtin` + Builtin names; names that are available in the global namespace. + +`Name.Builtin.Pseudo` + Builtin names that are implicit (e.g. ``self`` in Ruby, ``this`` in Java). + +`Name.Class` + Class names. Because no lexer can know if a name is a class or a function + or something else this token is meant for class declarations. + +`Name.Constant` + Token type for constants. In some languages you can recognise a token by the + way it's defined (the value after a ``const`` keyword for example). In + other languages constants are uppercase by definition (Ruby). + +`Name.Decorator` + Token type for decorators. Decorators are syntactic elements in the Python + language. Similar syntax elements exist in C# and Java. + +`Name.Entity` + Token type for special entities. (e.g. `` `` in HTML). + +`Name.Exception` + Token type for exception names (e.g. ``RuntimeError`` in Python). Some languages + define exceptions in the function signature (Java). You can highlight + the name of that exception using this token then. + +`Name.Function` + Token type for function names. + +`Name.Function.Magic` + same as `Name.Function` but for special function names that have an implicit use + in a language (e.g. ``__init__`` method in Python). + +`Name.Label` + Token type for label names (e.g. in languages that support ``goto``). + +`Name.Namespace` + Token type for namespaces. (e.g. import paths in Java/Python), names following + the ``module``/``namespace`` keyword in other languages. + +`Name.Other` + Other names. Normally unused. + +`Name.Tag` + Tag names (in HTML/XML markup or configuration files). + +`Name.Variable` + Token type for variables. Some languages have prefixes for variable names + (PHP, Ruby, Perl). You can highlight them using this token. + +`Name.Variable.Class` + same as `Name.Variable` but for class variables (also static variables). + +`Name.Variable.Global` + same as `Name.Variable` but for global variables (used in Ruby, for + example). + +`Name.Variable.Instance` + same as `Name.Variable` but for instance variables. + +`Name.Variable.Magic` + same as `Name.Variable` but for special variable names that have an implicit use + in a language (e.g. ``__doc__`` in Python). + + +Literals +======== + +`Literal` + For any literal (if not further defined). + +`Literal.Date` + for date literals (e.g. ``42d`` in Boo). + + +`String` + For any string literal. + +`String.Affix` + Token type for affixes that further specify the type of the string they're + attached to (e.g. the prefixes ``r`` and ``u8`` in ``r"foo"`` and ``u8"foo"``). + +`String.Backtick` + Token type for strings enclosed in backticks. + +`String.Char` + Token type for single characters (e.g. Java, C). + +`String.Delimiter` + Token type for delimiting identifiers in "heredoc", raw and other similar + strings (e.g. the word ``END`` in Perl code ``print <<'END';``). + +`String.Doc` + Token type for documentation strings (for example Python). + +`String.Double` + Double quoted strings. + +`String.Escape` + Token type for escape sequences in strings. + +`String.Heredoc` + Token type for "heredoc" strings (e.g. in Ruby or Perl). + +`String.Interpol` + Token type for interpolated parts in strings (e.g. ``#{foo}`` in Ruby). + +`String.Other` + Token type for any other strings (for example ``%q{foo}`` string constructs + in Ruby). + +`String.Regex` + Token type for regular expression literals (e.g. ``/foo/`` in JavaScript). + +`String.Single` + Token type for single quoted strings. + +`String.Symbol` + Token type for symbols (e.g. ``:foo`` in LISP or Ruby). + + +`Number` + Token type for any number literal. + +`Number.Bin` + Token type for binary literals (e.g. ``0b101010``). + +`Number.Float` + Token type for float literals (e.g. ``42.0``). + +`Number.Hex` + Token type for hexadecimal number literals (e.g. ``0xdeadbeef``). + +`Number.Integer` + Token type for integer literals (e.g. ``42``). + +`Number.Integer.Long` + Token type for long integer literals (e.g. ``42L`` in Python). + +`Number.Oct` + Token type for octal literals. + + +Operators +========= + +`Operator` + For any punctuation operator (e.g. ``+``, ``-``). + +`Operator.Word` + For any operator that is a word (e.g. ``not``). + + +Punctuation +=========== + +.. versionadded:: 0.7 + +`Punctuation` + For any punctuation which is not an operator (e.g. ``[``, ``(``...) + + +Comments +======== + +`Comment` + Token type for any comment. + +`Comment.Hashbang` + Token type for hashbang comments (i.e. first lines of files that start with + ``#!``). + +`Comment.Multiline` + Token type for multiline comments. + +`Comment.Preproc` + Token type for preprocessor comments (also ```. + +.. versionadded:: 0.7 + The formatters now also accept an `outencoding` option which will override + the `encoding` option if given. This makes it possible to use a single + options dict with lexers and formatters, and still have different input and + output encodings. + +.. _chardet: https://chardet.github.io/ diff --git a/doc/_build/html/_sources/download.rst.txt b/doc/_build/html/_sources/download.rst.txt new file mode 100644 index 0000000..975c41b --- /dev/null +++ b/doc/_build/html/_sources/download.rst.txt @@ -0,0 +1,39 @@ +Download and installation +========================= + +The current release is version |version|. + +Packaged versions +----------------- + +You can download it `from the Python Package Index +`_. For installation of packages from +PyPI, we recommend `Pip `_, which works on all +major platforms. + +Under Linux, most distributions include a package for Pygments, usually called +``pygments`` or ``python-pygments``. You can install it with the package +manager as usual. + +Development sources +------------------- + +We're using the Git version control system. You can get the development source +using this command:: + + git clone https://github.com/pygments/pygments + +Development takes place at `GitHub `_. + +The latest changes in the development source code are listed in the `changelog +`_. + +.. Documentation + ------------- + +.. XXX todo + + You can download the documentation either as + a bunch of rst files from the Git repository, see above, or + as a tar.gz containing rendered HTML files:

+

pygmentsdocs.tar.gz

diff --git a/doc/_build/html/_sources/faq.rst.txt b/doc/_build/html/_sources/faq.rst.txt new file mode 100644 index 0000000..108cef4 --- /dev/null +++ b/doc/_build/html/_sources/faq.rst.txt @@ -0,0 +1,140 @@ +:orphan: + +Pygments FAQ +============= + +What is Pygments? +----------------- + +Pygments is a syntax highlighting engine written in Python. That means, it will +take source code (or other markup) in a supported language and output a +processed version (in different formats) containing syntax highlighting markup. + +Its features include: + +* a wide range of common :doc:`languages and markup formats ` is supported +* new languages and formats are added easily +* a number of output formats is available, including: + + - HTML + - ANSI sequences (console output) + - LaTeX + - RTF + +* it is usable as a command-line tool and as a library +* parsing and formatting is fast + +Pygments is licensed under the BSD license. + +Where does the name Pygments come from? +--------------------------------------- + +*Py* of course stands for Python, while *pigments* are used for coloring paint, +and in this case, source code! + +What are the system requirements? +--------------------------------- + +Pygments only needs a standard Python install, version 2.7 or higher or version +3.5 or higher for Python 3. No additional libraries are needed. + +How can I use Pygments? +----------------------- + +Pygments is usable as a command-line tool as well as a library. + +From the command-line, usage looks like this (assuming the pygmentize script is +properly installed):: + + pygmentize -f html /path/to/file.py + +This will print a HTML-highlighted version of /path/to/file.py to standard output. + +For a complete help, please run ``pygmentize -h``. + +Usage as a library is thoroughly demonstrated in the Documentation section. + +How do I make a new style? +-------------------------- + +Please see the :doc:`documentation on styles `. + +How can I report a bug or suggest a feature? +-------------------------------------------- + +Please report bugs and feature wishes in the tracker at GitHub. + +You can also e-mail the authors, see the contact details. + +I want this support for this language! +-------------------------------------- + +Instead of waiting for others to include language support, why not write it +yourself? All you have to know is :doc:`outlined in the docs +`. + +Can I use Pygments for programming language processing? +------------------------------------------------------- + +The Pygments lexing machinery is quite powerful can be used to build lexers for +basically all languages. However, parsing them is not possible, though some +lexers go some steps in this direction in order to e.g. highlight function names +differently. + +Also, error reporting is not the scope of Pygments. It focuses on correctly +highlighting syntactically valid documents, not finding and compensating errors. + +Who uses Pygments? +------------------ + +This is an (incomplete) list of projects and sites known to use the Pygments highlighter. + +* `Wikipedia `_ +* `BitBucket `_, a Mercurial and Git hosting site +* `The Sphinx documentation builder `_, for embedded source examples +* `rst2pdf `_, a reStructuredText to PDF converter +* `Codecov `_, a code coverage CI service +* `Trac `_, the universal project management tool +* `AsciiDoc `_, a text-based documentation generator +* `ActiveState Code `_, the Python Cookbook successor +* `ViewVC `_, a web-based version control repository browser +* `BzrFruit `_, a Bazaar branch viewer +* `QBzr `_, a cross-platform Qt-based GUI front end for Bazaar +* `Review Board `_, a collaborative code reviewing tool +* `Diamanda `_, a Django powered wiki system with support for Pygments +* `Progopedia `_ (`English `_), + an encyclopedia of programming languages +* `Bruce `_, a reStructuredText presentation tool +* `PIDA `_, a universal IDE written in Python +* `BPython `_, a curses-based intelligent Python shell +* `PuDB `_, a console Python debugger +* `XWiki `_, a wiki-based development framework in Java, using Jython +* `roux `_, a script for running R scripts + and creating beautiful output including graphs +* `hurl `_, a web service for making HTTP requests +* `wxHTMLPygmentizer `_ is + a GUI utility, used to make code-colorization easier +* `Postmarkup `_, a BBCode to XHTML generator +* `WpPygments `_, and `WPygments + `_, highlighter plugins for WordPress +* `Siafoo `_, a tool for sharing and storing useful code and programming experience +* `D source `_, a community for the D programming language +* `dpaste.com `_, another Django pastebin +* `Django snippets `_, a pastebin for Django code +* `Fayaa `_, a Chinese pastebin +* `Incollo.com `_, a free collaborative debugging tool +* `PasteBox `_, a pastebin focused on privacy +* `hilite.me `_, a site to highlight code snippets +* `patx.me `_, a pastebin +* `Fluidic `_, an experiment in + integrating shells with a GUI +* `pygments.rb `_, a pygments wrapper for Ruby +* `Clygments `_, a pygments wrapper for + Clojure +* `PHPygments `_, a pygments wrapper for PHP +* `Spyder `_, the Scientific Python Development + Environment, uses pygments for the multi-language syntax highlighting in its + `editor `_. + +If you have a project or web site using Pygments, drop me a line, and I'll add a +link here. diff --git a/doc/_build/html/_sources/index.rst.txt b/doc/_build/html/_sources/index.rst.txt new file mode 100644 index 0000000..d89277e --- /dev/null +++ b/doc/_build/html/_sources/index.rst.txt @@ -0,0 +1,49 @@ +Welcome! +======== + +This is the home of Pygments. It is a generic syntax highlighter suitable for +use in code hosting, forums, wikis or other applications that need to prettify +source code. Highlights are: + +* a wide range of over 300 languages and other text formats is supported +* special attention is paid to details that increase highlighting quality +* support for new languages and formats are added easily; most languages use a + simple regex-based lexing mechanism +* a number of output formats is available, among them HTML, RTF, LaTeX and ANSI + sequences +* it is usable as a command-line tool and as a library +* ... and it highlights even Perl 6! + +Read more in the :doc:`FAQ list ` or the :doc:`documentation `, +or `download the latest release `_. + +.. _contribute: + +Contribute +---------- + +Like every open-source project, we are always looking for volunteers to help us +with programming. Python knowledge is required, but don't fear: Python is a very +clear and easy to learn language. + +Development takes place on `GitHub `_. + +If you found a bug, just open a ticket in the GitHub tracker. Be sure to log +in to be notified when the issue is fixed -- development is not fast-paced as +the library is quite stable. You can also send an e-mail to the developers, see +below. + +The authors +----------- + +Pygments is maintained by **Georg Brandl**, e-mail address *georg*\ *@*\ *python.org* +and **Matthäus Chajdas**. + +Many lexers and fixes have been contributed by **Armin Ronacher**, the rest of +the `Pocoo `_ team and **Tim Hatch**. + +.. toctree:: + :maxdepth: 1 + :hidden: + + docs/index diff --git a/doc/_build/html/_sources/languages.rst.txt b/doc/_build/html/_sources/languages.rst.txt new file mode 100644 index 0000000..a91664c --- /dev/null +++ b/doc/_build/html/_sources/languages.rst.txt @@ -0,0 +1,176 @@ +:orphan: + +Supported languages +=================== + +Pygments supports an ever-growing range of languages. Watch this space... + +Programming languages +--------------------- + +* ActionScript +* Ada +* ANTLR +* AppleScript +* Assembly (various) +* Asymptote +* `Augeas `_ +* Awk +* BBC Basic +* Befunge +* `Boa `_ +* Boo +* BrainFuck +* C, C++ +* C# +* `Charm++ CI `_ +* Clojure +* CoffeeScript +* ColdFusion +* Common Lisp +* Coq +* Cryptol (incl. Literate Cryptol) +* `Crystal `_ +* `Cython `_ +* `D `_ +* Dart +* DCPU-16 +* Delphi +* Dylan +* `Elm `_ +* Email +* Erlang +* `Ezhil `_ Ezhil - A Tamil programming language +* Factor +* Fancy +* `Fennel `_ +* `FloScript `_ +* Fortran +* `FreeFEM++ `_ +* F# +* GAP +* Gherkin (Cucumber) +* GL shaders +* Groovy +* `Haskell `_ (incl. Literate Haskell) +* HLSL +* `HSpec `_ +* IDL +* Io +* Java +* JavaScript +* Lasso +* LLVM +* Logtalk +* `Lua `_ +* Matlab +* MiniD +* Modelica +* Modula-2 +* MuPad +* Nemerle +* Nimrod +* Notmuch +* Objective-C +* Objective-J +* Octave +* OCaml +* PHP +* `Perl 5 `_ and `Perl 6 `_ +* `Pony `_ +* PovRay +* PostScript +* PowerShell +* Prolog +* `Python `_ 2.x and 3.x (incl. console sessions and tracebacks) +* `REBOL `_ +* `Red `_ +* Redcode +* `Ruby `_ (incl. irb sessions) +* Rust +* S, S-Plus, R +* Scala +* `Scdoc `_ +* Scheme +* Scilab +* `SGF `_ +* `Slash `_ +* `Slurm `_ +* Smalltalk +* SNOBOL +* `Solidity `_ +* Tcl +* `Tera Term language `_ +* `TOML `_ +* Vala +* Verilog +* VHDL +* Visual Basic.NET +* Visual FoxPro +* XQuery +* `Zeek `_ +* Zephir +* `Zig `_ + +Template languages +------------------ + +* Cheetah templates +* `Django `_ / `Jinja + `_ templates +* ERB (Ruby templating) +* `Genshi `_ (the Trac template language) +* JSP (Java Server Pages) +* `Myghty `_ (the HTML::Mason based framework) +* `Mako `_ (the Myghty successor) +* `Smarty `_ templates (PHP templating) +* Tea + +Other markup +------------ + +* Apache config files +* Bash shell scripts +* BBCode +* CMake +* CSS +* Debian control files +* Diff files +* DTD +* Gettext catalogs +* Gnuplot script +* Groff markup +* HTML +* HTTP sessions +* INI-style config files +* IRC logs (irssi style) +* Lighttpd config files +* Makefiles +* MoinMoin/Trac Wiki markup +* MySQL +* Nginx config files +* POV-Ray scenes +* Ragel +* Redcode +* ReST +* Robot Framework +* RPM spec files +* SQL, also MySQL, SQLite +* Squid configuration +* TeX +* tcsh +* Vim Script +* Windows batch files +* XML +* XSLT +* YAML + +... that's all? +--------------- + +Well, why not write your own? Contributing to Pygments is easy and fun. Take a +look at the :doc:`docs on lexer development `. Pull +requests are welcome on `GitHub `. + +Note: the languages listed here are supported in the development version. The +latest release may lack a few of them. diff --git a/doc/_build/html/_static/basic.css b/doc/_build/html/_static/basic.css new file mode 100644 index 0000000..ea6972d --- /dev/null +++ b/doc/_build/html/_static/basic.css @@ -0,0 +1,764 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li div.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 450px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +a.brackets:before, +span.brackets > a:before{ + content: "["; +} + +a.brackets:after, +span.brackets > a:after { + content: "]"; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px 7px 0 7px; + background-color: #ffe; + width: 40%; + float: right; +} + +p.sidebar-title { + font-weight: bold; +} + +/* -- topics ---------------------------------------------------------------- */ + +div.topic { + border: 1px solid #ccc; + padding: 7px 7px 0 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +div.admonition dl { + margin-bottom: 0; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +table.footnote td, table.footnote th { + border: 0 !important; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > p:first-child, +td > p:first-child { + margin-top: 0px; +} + +th > p:last-child, +td > p:last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist td { + vertical-align: top; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +li > p:first-child { + margin-top: 0px; +} + +li > p:last-child { + margin-bottom: 0px; +} + +dl.footnote > dt, +dl.citation > dt { + float: left; +} + +dl.footnote > dd, +dl.citation > dd { + margin-bottom: 0em; +} + +dl.footnote > dd:after, +dl.citation > dd:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dt:after { + content: ":"; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > p:first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0.5em; + content: ":"; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; +} + +td.linenos pre { + padding: 5px 0px; + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + margin-left: 0.5em; +} + +table.highlighttable td { + padding: 0 0.5em 0 0.5em; +} + +div.code-block-caption { + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +div.code-block-caption + div > div.highlight > pre { + margin-top: 0; +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + padding: 1em 1em 0; +} + +div.literal-block-wrapper div.highlight { + margin: 0; +} + +code.descname { + background-color: transparent; + font-weight: bold; + font-size: 1.2em; +} + +code.descclassname { + background-color: transparent; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: relative; + left: 0px; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/doc/_build/html/_static/bodybg.png b/doc/_build/html/_static/bodybg.png new file mode 100644 index 0000000..46892b8 Binary files /dev/null and b/doc/_build/html/_static/bodybg.png differ diff --git a/doc/_build/html/_static/demo.css b/doc/_build/html/_static/demo.css new file mode 100644 index 0000000..9344291 --- /dev/null +++ b/doc/_build/html/_static/demo.css @@ -0,0 +1,38 @@ +#try { + background-color: #f6f6f6; + border-radius: 0; + border: 1px solid #ccc; + margin-top: 15px; + padding: 10px 15px 5px 10px; + position: relative; +} + +#try h2 { + margin-top: 0; +} + +#try textarea { + border: 1px solid #999; + padding: 2px; + width: 100%; + min-height: 150px; +} + +#hlcode pre { + background-color: transparent; + border-radius: 0; +} + +#loading { + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; + margin: auto auto; + background-color: #cccccccc; + display: flex; + flex-direction: column; + justify-content: center; + text-align: center; +} diff --git a/doc/_build/html/_static/demo.js b/doc/_build/html/_static/demo.js new file mode 100644 index 0000000..f538492 --- /dev/null +++ b/doc/_build/html/_static/demo.js @@ -0,0 +1,100 @@ +languagePluginLoader.then(() => { + // pyodide is now ready to use... + pyodide.loadPackage('Pygments').then(() => { + pyodide.runPython('import pygments.lexers, pygments.formatters.html, pygments.styles'); + + var lexerlist = pyodide.runPython('list(pygments.lexers.get_all_lexers())'); + var sel = document.getElementById("lang"); + for (lex of lexerlist) { + var opt = document.createElement("option"); + opt.text = lex[0]; + opt.value = lex[1][0]; + sel.add(opt); + } + + var stylelist = pyodide.runPython('list(pygments.styles.get_all_styles())'); + var sel = document.getElementById("style"); + for (sty of stylelist) { + if (sty != "default") { + var opt = document.createElement("option"); + opt.text = sty; + opt.value = sty; + sel.add(opt); + } + } + + document.getElementById("hlbtn").disabled = false; + document.getElementById("loading").style.display = "none"; + }); +}); + +function new_file() { + pyodide.globals['fname'] = document.getElementById("file").files[0].name; + var alias = pyodide.runPython('pygments.lexers.find_lexer_class_for_filename(fname).aliases[0]'); + var sel = document.getElementById("lang"); + for (var i = 0; i < sel.length; i++) { + if (sel.options[i].value == alias) { + sel.selectedIndex = i; + reset_err_hl(); + break; + } + } +} + +function reset_err_hl() { + document.getElementById("aroundlang").style.backgroundColor = null; +} + +function highlight() { + var select = document.getElementById("lang"); + var alias = select.options.item(select.selectedIndex).value + + if (alias == "") { + document.getElementById("aroundlang").style.backgroundColor = "#ffcccc"; + return; + } + pyodide.globals['alias'] = alias; + + var select = document.getElementById("style"); + pyodide.globals['style'] = select.options.item(select.selectedIndex).value; + + pyodide.runPython('lexer = pygments.lexers.get_lexer_by_name(alias)'); + pyodide.runPython('fmter = pygments.formatters.html.HtmlFormatter(noclasses=True, style=style)'); + + var file = document.getElementById("file").files[0]; + if (file) { + file.arrayBuffer().then(function(buf) { + pyodide.globals['code_mem'] = buf; + pyodide.runPython('code = bytes(code_mem)'); + highlight_now(); + }); + } else { + pyodide.globals['code'] = document.getElementById("code").value; + highlight_now(); + } +} + +function highlight_now() { + var out = document.getElementById("hlcode"); + out.innerHTML = pyodide.runPython('pygments.highlight(code, lexer, fmter)'); + document.location.hash = "#try"; + document.getElementById("hlcodedl").style.display = "block"; +} + +function download_code() { + var filename = "highlighted.html"; + var hlcode = document.getElementById("hlcode").innerHTML; + var blob = new Blob([hlcode], {type: 'text/html'}); + if (window.navigator.msSaveOrOpenBlob) { + window.navigator.msSaveBlob(blob, filename); + } + else{ + var elem = window.document.createElement('a'); + elem.href = window.URL.createObjectURL(blob); + elem.download = filename; + document.body.appendChild(elem); + elem.click(); + document.body.removeChild(elem); + window.URL.revokeObjectURL(elem.href); + } +} diff --git a/doc/_build/html/_static/docbg.png b/doc/_build/html/_static/docbg.png new file mode 100644 index 0000000..13e61f3 Binary files /dev/null and b/doc/_build/html/_static/docbg.png differ diff --git a/doc/_build/html/_static/doctools.js b/doc/_build/html/_static/doctools.js new file mode 100644 index 0000000..b33f87f --- /dev/null +++ b/doc/_build/html/_static/doctools.js @@ -0,0 +1,314 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for all documentation. + * + * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/** + * select a different prefix for underscore + */ +$u = _.noConflict(); + +/** + * make the code below compatible with browsers without + * an installed firebug like debugger +if (!window.console || !console.firebug) { + var names = ["log", "debug", "info", "warn", "error", "assert", "dir", + "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", + "profile", "profileEnd"]; + window.console = {}; + for (var i = 0; i < names.length; ++i) + window.console[names[i]] = function() {}; +} + */ + +/** + * small helper function to urldecode strings + */ +jQuery.urldecode = function(x) { + return decodeURIComponent(x).replace(/\+/g, ' '); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && + !jQuery(node.parentNode).hasClass(className) && + !jQuery(node.parentNode).hasClass("nohighlight")) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + var bbox = node.parentElement.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} + +/** + * Small JavaScript module for the documentation. + */ +var Documentation = { + + init : function() { + this.fixFirefoxAnchorBug(); + this.highlightSearchWords(); + this.initIndexTable(); + if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { + this.initOnKeyListeners(); + } + }, + + /** + * i18n support + */ + TRANSLATIONS : {}, + PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, + LOCALE : 'unknown', + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext : function(string) { + var translated = Documentation.TRANSLATIONS[string]; + if (typeof translated === 'undefined') + return string; + return (typeof translated === 'string') ? translated : translated[0]; + }, + + ngettext : function(singular, plural, n) { + var translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated === 'undefined') + return (n == 1) ? singular : plural; + return translated[Documentation.PLURALEXPR(n)]; + }, + + addTranslations : function(catalog) { + for (var key in catalog.messages) + this.TRANSLATIONS[key] = catalog.messages[key]; + this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); + this.LOCALE = catalog.locale; + }, + + /** + * add context elements like header anchor links + */ + addContextElements : function() { + $('div[id] > :header:first').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this headline')). + appendTo(this); + }); + $('dt[id]').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this definition')). + appendTo(this); + }); + }, + + /** + * workaround a firefox stupidity + * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 + */ + fixFirefoxAnchorBug : function() { + if (document.location.hash && $.browser.mozilla) + window.setTimeout(function() { + document.location.href += ''; + }, 10); + }, + + /** + * highlight the search words provided in the url in the text + */ + highlightSearchWords : function() { + var params = $.getQueryParameters(); + var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; + if (terms.length) { + var body = $('div.body'); + if (!body.length) { + body = $('body'); + } + window.setTimeout(function() { + $.each(terms, function() { + body.highlightText(this.toLowerCase(), 'highlighted'); + }); + }, 10); + $('') + .appendTo($('#searchbox')); + } + }, + + /** + * init the domain index toggle buttons + */ + initIndexTable : function() { + var togglers = $('img.toggler').click(function() { + var src = $(this).attr('src'); + var idnum = $(this).attr('id').substr(7); + $('tr.cg-' + idnum).toggle(); + if (src.substr(-9) === 'minus.png') + $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); + else + $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); + }).css('display', ''); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { + togglers.click(); + } + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords : function() { + $('#searchbox .highlight-link').fadeOut(300); + $('span.highlighted').removeClass('highlighted'); + }, + + /** + * make the url absolute + */ + makeURL : function(relativeURL) { + return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; + }, + + /** + * get the current relative url + */ + getCurrentURL : function() { + var path = document.location.pathname; + var parts = path.split(/\//); + $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { + if (this === '..') + parts.pop(); + }); + var url = parts.join('/'); + return path.substring(url.lastIndexOf('/') + 1, path.length - 1); + }, + + initOnKeyListeners: function() { + $(document).keyup(function(event) { + var activeElementType = document.activeElement.tagName; + // don't navigate when in search box or textarea + if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { + switch (event.keyCode) { + case 37: // left + var prevHref = $('link[rel="prev"]').prop('href'); + if (prevHref) { + window.location.href = prevHref; + return false; + } + case 39: // right + var nextHref = $('link[rel="next"]').prop('href'); + if (nextHref) { + window.location.href = nextHref; + return false; + } + } + } + }); + } +}; + +// quick alias for translations +_ = Documentation.gettext; + +$(document).ready(function() { + Documentation.init(); +}); diff --git a/doc/_build/html/_static/documentation_options.js b/doc/_build/html/_static/documentation_options.js new file mode 100644 index 0000000..6ab1660 --- /dev/null +++ b/doc/_build/html/_static/documentation_options.js @@ -0,0 +1,10 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), + VERSION: '2.4.2', + LANGUAGE: 'None', + COLLAPSE_INDEX: false, + FILE_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false +}; \ No newline at end of file diff --git a/doc/_build/html/_static/favicon.ico b/doc/_build/html/_static/favicon.ico new file mode 100644 index 0000000..777f617 Binary files /dev/null and b/doc/_build/html/_static/favicon.ico differ diff --git a/doc/_build/html/_static/file.png b/doc/_build/html/_static/file.png new file mode 100644 index 0000000..a858a41 Binary files /dev/null and b/doc/_build/html/_static/file.png differ diff --git a/doc/_build/html/_static/github.png b/doc/_build/html/_static/github.png new file mode 100644 index 0000000..5d146ad Binary files /dev/null and b/doc/_build/html/_static/github.png differ diff --git a/doc/_build/html/_static/jquery-3.4.1.js b/doc/_build/html/_static/jquery-3.4.1.js new file mode 100644 index 0000000..773ad95 --- /dev/null +++ b/doc/_build/html/_static/jquery-3.4.1.js @@ -0,0 +1,10598 @@ +/*! + * jQuery JavaScript Library v3.4.1 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2019-05-01T21:04Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var document = window.document; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var concat = arr.concat; + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + +var isFunction = function isFunction( obj ) { + + // Support: Chrome <=57, Firefox <=52 + // In some browsers, typeof returns "function" for HTML elements + // (i.e., `typeof document.createElement( "object" ) === "function"`). + // We don't want to classify *any* DOM node as a function. + return typeof obj === "function" && typeof obj.nodeType !== "number"; + }; + + +var isWindow = function isWindow( obj ) { + return obj != null && obj === obj.window; + }; + + + + + var preservedScriptAttributes = { + type: true, + src: true, + nonce: true, + noModule: true + }; + + function DOMEval( code, node, doc ) { + doc = doc || document; + + var i, val, + script = doc.createElement( "script" ); + + script.text = code; + if ( node ) { + for ( i in preservedScriptAttributes ) { + + // Support: Firefox 64+, Edge 18+ + // Some browsers don't support the "nonce" property on scripts. + // On the other hand, just using `getAttribute` is not enough as + // the `nonce` attribute is reset to an empty string whenever it + // becomes browsing-context connected. + // See https://github.com/whatwg/html/issues/2369 + // See https://html.spec.whatwg.org/#nonce-attributes + // The `node.getAttribute` check was added for the sake of + // `jQuery.globalEval` so that it can fake a nonce-containing node + // via an object. + val = node[ i ] || node.getAttribute && node.getAttribute( i ); + if ( val ) { + script.setAttribute( i, val ); + } + } + } + doc.head.appendChild( script ).parentNode.removeChild( script ); + } + + +function toType( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; +} +/* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + +var + version = "3.4.1", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }, + + // Support: Android <=4.0 only + // Make sure we trim BOM and NBSP + rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g; + +jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } + + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + copy = options[ name ]; + + // Prevent Object.prototype pollution + // Prevent never-ending loop + if ( name === "__proto__" || target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + src = target[ name ]; + + // Ensure proper type for the source value + if ( copyIsArray && !Array.isArray( src ) ) { + clone = []; + } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { + clone = {}; + } else { + clone = src; + } + copyIsArray = false; + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + // Evaluates a script in a global context + globalEval: function( code, options ) { + DOMEval( code, { nonce: options && options.nonce } ); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // Support: Android <=4.0 only + trim: function( text ) { + return text == null ? + "" : + ( text + "" ).replace( rtrim, "" ); + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return concat.apply( [], ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), +function( i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); +} ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = toType( obj ); + + if ( isFunction( obj ) || isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.4 + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://js.foundation/ + * + * Date: 2019-04-08 + */ +(function( window ) { + +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + nonnativeSelectorCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ({}).hasOwnProperty, + arr = [], + pop = arr.pop, + push_native = arr.push, + push = arr.push, + slice = arr.slice, + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[i] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier + identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), + rdescend = new RegExp( whitespace + "|>" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + + whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rhtml = /HTML$/i, + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), + funescape = function( _, escaped, escapedWhitespace ) { + var high = "0x" + escaped - 0x10000; + // NaN means non-codepoint + // Support: Firefox<24 + // Workaround erroneous numeric interpretation of +"0x" + return high !== high || escapedWhitespace ? + escaped : + high < 0 ? + // BMP codepoint + String.fromCharCode( high + 0x10000 ) : + // Supplemental Plane codepoint (surrogate pair) + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + inDisabledFieldset = addCombinator( + function( elem ) { + return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + (arr = slice.call( preferredDoc.childNodes )), + preferredDoc.childNodes + ); + // Support: Android<4.0 + // Detect silently failing push.apply + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + push_native.apply( target, slice.call(els) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + // Can't trust NodeList.length + while ( (target[j++] = els[i++]) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + + if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { + setDocument( context ); + } + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { + + // ID selector + if ( (m = match[1]) ) { + + // Document context + if ( nodeType === 9 ) { + if ( (elem = context.getElementById( m )) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && (elem = newContext.getElementById( m )) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[2] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( (m = match[3]) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !nonnativeSelectorCache[ selector + " " ] && + (!rbuggyQSA || !rbuggyQSA.test( selector )) && + + // Support: IE 8 only + // Exclude object elements + (nodeType !== 1 || context.nodeName.toLowerCase() !== "object") ) { + + newSelector = selector; + newContext = context; + + // qSA considers elements outside a scoping root when evaluating child or + // descendant combinators, which is not what we want. + // In such cases, we work around the behavior by prefixing every selector in the + // list with an ID selector referencing the scope context. + // Thanks to Andrew Dupont for this technique. + if ( nodeType === 1 && rdescend.test( selector ) ) { + + // Capture the context ID, setting it first if necessary + if ( (nid = context.getAttribute( "id" )) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", (nid = expando) ); + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[i] = "#" + nid + " " + toSelector( groups[i] ); + } + newSelector = groups.join( "," ); + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + } + + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + nonnativeSelectorCache( selector, true ); + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return (cache[ key + " " ] = value); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement("fieldset"); + + try { + return !!fn( el ); + } catch (e) { + return false; + } finally { + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split("|"), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[i] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( (cur = cur.nextSibling) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return (name === "input" || name === "button") && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + inDisabledFieldset( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction(function( argument ) { + argument = +argument; + return markFunction(function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ (j = matchIndexes[i]) ] ) { + seed[j] = !(matches[j] = seed[j]); + } + } + }); + }); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + var namespace = elem.namespaceURI, + docElem = (elem.ownerDocument || elem).documentElement; + + // Support: IE <=8 + // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes + // https://bugs.jquery.com/ticket/4833 + return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9-11, Edge + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + if ( preferredDoc !== document && + (subWindow = document.defaultView) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert(function( el ) { + el.className = "i"; + return !el.getAttribute("className"); + }); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert(function( el ) { + el.appendChild( document.createComment("") ); + return !el.getElementsByTagName("*").length; + }); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert(function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + }); + + // ID filter and find + if ( support.getById ) { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute("id") === attrId; + }; + }; + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode("id"); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( (elem = elems[i++]) ) { + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find["TAG"] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( (elem = results[i++]) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert(function( el ) { + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll("[msallowcapture^='']").length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll("[selected]").length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push("~="); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll(":checked").length ) { + rbuggyQSA.push(":checked"); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push(".#.+[+~]"); + } + }); + + assert(function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement("input"); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll("[name=d]").length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll(":enabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll(":disabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll("*,:x"); + rbuggyQSA.push(",.*:"); + }); + } + + if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector) )) ) { + + assert(function( el ) { + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + }); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + )); + } : + function( a, b ) { + if ( b ) { + while ( (b = b.parentNode) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { + + // Choose the first element that is related to our preferred document + if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { + return -1; + } + if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + return a === document ? -1 : + b === document ? 1 : + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( (cur = cur.parentNode) ) { + ap.unshift( cur ); + } + cur = b; + while ( (cur = cur.parentNode) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[i] === bp[i] ) { + i++; + } + + return i ? + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[i], bp[i] ) : + + // Otherwise nodes in our document sort first + ap[i] === preferredDoc ? -1 : + bp[i] === preferredDoc ? 1 : + 0; + }; + + return document; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + if ( support.matchesSelector && documentIsHTML && + !nonnativeSelectorCache[ expr + " " ] && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch (e) { + nonnativeSelectorCache( expr, true ); + } + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + // Set document vars if needed + if ( ( context.ownerDocument || context ) !== document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + (val = elem.getAttributeNode(name)) && val.specified ? + val.value : + null; +}; + +Sizzle.escape = function( sel ) { + return (sel + "").replace( rcssescape, fcssescape ); +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( (elem = results[i++]) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + // If no nodeType, this is expected to be an array + while ( (node = elem[i++]) ) { + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[1] = match[1].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); + + if ( match[2] === "~=" ) { + match[3] = " " + match[3] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[1] = match[1].toLowerCase(); + + if ( match[1].slice( 0, 3 ) === "nth" ) { + // nth-* requires argument + if ( !match[3] ) { + Sizzle.error( match[0] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); + match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); + + // other types prohibit arguments + } else if ( match[3] ) { + Sizzle.error( match[0] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[6] && match[2]; + + if ( matchExpr["CHILD"].test( match[0] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[3] ) { + match[2] = match[4] || match[5] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + // Get excess from tokenize (recursively) + (excess = tokenize( unquoted, true )) && + // advance to the next closing parenthesis + (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { + + // excess is a negative index + match[0] = match[0].slice( 0, excess ); + match[2] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { return true; } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && + classCache( className, function( elem ) { + return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); + }); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + }; + }, + + "CHILD": function( type, what, argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, context, xml ) { + var cache, uniqueCache, outerCache, node, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType, + diff = false; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( (node = node[ dir ]) ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + + return false; + } + } + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + + // Seek `elem` from a previously-cached index + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( (node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + (diff = nodeIndex = 0) || start.pop()) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + } else { + // Use previously-cached element index if available + if ( useCache ) { + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + // Use the same loop as above to seek `elem` from the start + while ( (node = ++nodeIndex && node && node[ dir ] || + (diff = nodeIndex = 0) || start.pop()) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction(function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf( seed, matched[i] ); + seed[ idx ] = !( matches[ idx ] = matched[i] ); + } + }) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + // Potentially complex pseudos + "not": markFunction(function( selector ) { + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction(function( seed, matches, context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( (elem = unmatched[i]) ) { + seed[i] = !(matches[i] = elem); + } + } + }) : + function( elem, context, xml ) { + input[0] = elem; + matcher( input, null, xml, results ); + // Don't keep the element (issue #299) + input[0] = null; + return !results.pop(); + }; + }), + + "has": markFunction(function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + }), + + "contains": markFunction(function( text ) { + text = text.replace( runescape, funescape ); + return function( elem ) { + return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; + }; + }), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + // lang value must be a valid identifier + if ( !ridentifier.test(lang || "") ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( (elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); + return false; + }; + }), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); + }, + + // Boolean properties + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), + + "checked": function( elem ) { + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); + }, + + "selected": function( elem ) { + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos["empty"]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo(function() { + return [ 0 ]; + }), + + "last": createPositionalPseudo(function( matchIndexes, length ) { + return [ length - 1 ]; + }), + + "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + }), + + "even": createPositionalPseudo(function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "odd": createPositionalPseudo(function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? + argument + length : + argument > length ? + length : + argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }) + } +}; + +Expr.pseudos["nth"] = Expr.pseudos["eq"]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || (match = rcomma.exec( soFar )) ) { + if ( match ) { + // Don't consume trailing commas as valid + soFar = soFar.slice( match[0].length ) || soFar; + } + groups.push( (tokens = []) ); + } + + matched = false; + + // Combinators + if ( (match = rcombinators.exec( soFar )) ) { + matched = match.shift(); + tokens.push({ + value: matched, + // Cast descendant combinators to space + type: match[0].replace( rtrim, " " ) + }); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || + (match = preFilters[ type ]( match ))) ) { + matched = match.shift(); + tokens.push({ + value: matched, + type: type, + matches: match + }); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[i].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", + doneName = done++; + + return combinator.first ? + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + return false; + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching + if ( xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || (elem[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( (oldCache = uniqueCache[ key ]) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return (newCache[ 2 ] = oldCache[ 2 ]); + } else { + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { + return true; + } + } + } + } + } + return false; + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[i]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[0]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[i], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( (elem = unmatched[i]) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction(function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( (elem = temp[i]) ) { + matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) ) { + // Restore matcherIn since elem is not yet a final match + temp.push( (matcherIn[i] = elem) ); + } + } + postFinder( null, (matcherOut = []), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) && + (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { + + seed[temp] = !(results[temp] = elem); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + }); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[0].type ], + implicitRelative = leadingRelative || Expr.relative[" "], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + (checkContext = context).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; + } ]; + + for ( ; i < len; i++ ) { + if ( (matcher = Expr.relative[ tokens[i].type ]) ) { + matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; + } else { + matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[j].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), + len = elems.length; + + if ( outermost ) { + outermostContext = context === document || context || outermost; + } + + // Add elements passing elementMatchers directly to results + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && (elem = elems[i]) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + if ( !context && elem.ownerDocument !== document ) { + setDocument( elem ); + xml = !documentIsHTML; + } + while ( (matcher = elementMatchers[j++]) ) { + if ( matcher( elem, context || document, xml) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + // They will have gone through all possible matchers + if ( (elem = !matcher && elem) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // `i` is now the count of elements visited above, and adding it to `matchedCount` + // makes the latter nonnegative. + matchedCount += i; + + // Apply set filters to unmatched elements + // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` + // equals `i`), unless we didn't visit _any_ elements in the above loop because we have + // no element matchers and no seed. + // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that + // case, which will result in a "00" `matchedCount` that differs from `i` but is also + // numerically zero. + if ( bySet && i !== matchedCount ) { + j = 0; + while ( (matcher = setMatchers[j++]) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !(unmatched[i] || setMatched[i]) ) { + setMatched[i] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[i] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( (selector = compiled.selector || selector) ); + + results = results || []; + + // Try to minimize operations if there is only one selector in the list and no seed + // (the latter of which guarantees us context) + if ( match.length === 1 ) { + + // Reduce context if the leading compound selector is an ID + tokens = match[0] = match[0].slice( 0 ); + if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && + context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { + + context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[i]; + + // Abort if we hit a combinator + if ( Expr.relative[ (type = token.type) ] ) { + break; + } + if ( (find = Expr.find[ type ]) ) { + // Search, expanding context for leading sibling combinators + if ( (seed = find( + token.matches[0].replace( runescape, funescape ), + rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context + )) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + !context || rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; + +// Support: Chrome 14-35+ +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert(function( el ) { + // Should return 1, but returns 4 (following) + return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; +}); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert(function( el ) { + el.innerHTML = ""; + return el.firstChild.getAttribute("href") === "#" ; +}) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + }); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert(function( el ) { + el.innerHTML = ""; + el.firstChild.setAttribute( "value", "" ); + return el.firstChild.getAttribute( "value" ) === ""; +}) ) { + addHandle( "value", function( elem, name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + }); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert(function( el ) { + return el.getAttribute("disabled") == null; +}) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + (val = elem.getAttributeNode( name )) && val.specified ? + val.value : + null; + } + }); +} + +return Sizzle; + +})( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; + +// Deprecated +jQuery.expr[ ":" ] = jQuery.expr.pseudos; +jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; +jQuery.escapeSelector = Sizzle.escape; + + + + +var dir = function( elem, dir, until ) { + var matched = [], + truncate = until !== undefined; + + while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { + if ( elem.nodeType === 1 ) { + if ( truncate && jQuery( elem ).is( until ) ) { + break; + } + matched.push( elem ); + } + } + return matched; +}; + + +var siblings = function( n, elem ) { + var matched = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + matched.push( n ); + } + } + + return matched; +}; + + +var rneedsContext = jQuery.expr.match.needsContext; + + + +function nodeName( elem, name ) { + + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + +}; +var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); + + + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + return !!qualifier.call( elem, i, elem ) !== not; + } ); + } + + // Single element + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + } ); + } + + // Arraylike of elements (jQuery, arguments, Array) + if ( typeof qualifier !== "string" ) { + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not; + } ); + } + + // Filtered directly for both simple and complex selectors + return jQuery.filter( qualifier, elements, not ); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + if ( elems.length === 1 && elem.nodeType === 1 ) { + return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; + } + + return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + } ) ); +}; + +jQuery.fn.extend( { + find: function( selector ) { + var i, ret, + len = this.length, + self = this; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter( function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + } ) ); + } + + ret = this.pushStack( [] ); + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + return len > 1 ? jQuery.uniqueSort( ret ) : ret; + }, + filter: function( selector ) { + return this.pushStack( winnow( this, selector || [], false ) ); + }, + not: function( selector ) { + return this.pushStack( winnow( this, selector || [], true ) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +} ); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + // Shortcut simple #id case for speed + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, + + init = jQuery.fn.init = function( selector, context, root ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Method init() accepts an alternate rootjQuery + // so migrate can support jQuery.sub (gh-2101) + root = root || rootjQuery; + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector[ 0 ] === "<" && + selector[ selector.length - 1 ] === ">" && + selector.length >= 3 ) { + + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && ( match[ 1 ] || !context ) ) { + + // HANDLE: $(html) -> $(array) + if ( match[ 1 ] ) { + context = context instanceof jQuery ? context[ 0 ] : context; + + // Option to run scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[ 1 ], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + + // Properties of context are called as methods if possible + if ( isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[ 2 ] ); + + if ( elem ) { + + // Inject the element directly into the jQuery object + this[ 0 ] = elem; + this.length = 1; + } + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || root ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this[ 0 ] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( isFunction( selector ) ) { + return root.ready !== undefined ? + root.ready( selector ) : + + // Execute immediately if ready is not present + selector( jQuery ); + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + + // Methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend( { + has: function( target ) { + var targets = jQuery( target, this ), + l = targets.length; + + return this.filter( function() { + var i = 0; + for ( ; i < l; i++ ) { + if ( jQuery.contains( this, targets[ i ] ) ) { + return true; + } + } + } ); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + targets = typeof selectors !== "string" && jQuery( selectors ); + + // Positional selectors never match, since there's no _selection_ context + if ( !rneedsContext.test( selectors ) ) { + for ( ; i < l; i++ ) { + for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { + + // Always skip document fragments + if ( cur.nodeType < 11 && ( targets ? + targets.index( cur ) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector( cur, selectors ) ) ) { + + matched.push( cur ); + break; + } + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); + }, + + // Determine the position of an element within the set + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; + } + + // Index in selector + if ( typeof elem === "string" ) { + return indexOf.call( jQuery( elem ), this[ 0 ] ); + } + + // Locate the position of the desired element + return indexOf.call( this, + + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[ 0 ] : elem + ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.uniqueSort( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter( selector ) + ); + } +} ); + +function sibling( cur, dir ) { + while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} + return cur; +} + +jQuery.each( { + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, i, until ) { + return dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, i, until ) { + return dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, i, until ) { + return dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return siblings( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return siblings( elem.firstChild ); + }, + contents: function( elem ) { + if ( typeof elem.contentDocument !== "undefined" ) { + return elem.contentDocument; + } + + // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only + // Treat the template element as a regular one in browsers that + // don't support it. + if ( nodeName( elem, "template" ) ) { + elem = elem.content || elem; + } + + return jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var matched = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + matched = jQuery.filter( selector, matched ); + } + + if ( this.length > 1 ) { + + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + jQuery.uniqueSort( matched ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + matched.reverse(); + } + } + + return this.pushStack( matched ); + }; +} ); +var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); + + + +// Convert String-formatted options into Object-formatted ones +function createOptions( options ) { + var object = {}; + jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { + object[ flag ] = true; + } ); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + createOptions( options ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + + // Last fire value for non-forgettable lists + memory, + + // Flag to know if list was already fired + fired, + + // Flag to prevent firing + locked, + + // Actual callback list + list = [], + + // Queue of execution data for repeatable lists + queue = [], + + // Index of currently firing callback (modified by add/remove as needed) + firingIndex = -1, + + // Fire callbacks + fire = function() { + + // Enforce single-firing + locked = locked || options.once; + + // Execute callbacks for all pending executions, + // respecting firingIndex overrides and runtime changes + fired = firing = true; + for ( ; queue.length; firingIndex = -1 ) { + memory = queue.shift(); + while ( ++firingIndex < list.length ) { + + // Run callback and check for early termination + if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && + options.stopOnFalse ) { + + // Jump to end and forget the data so .add doesn't re-fire + firingIndex = list.length; + memory = false; + } + } + } + + // Forget the data if we're done with it + if ( !options.memory ) { + memory = false; + } + + firing = false; + + // Clean up if we're done firing for good + if ( locked ) { + + // Keep an empty list if we have data for future add calls + if ( memory ) { + list = []; + + // Otherwise, this object is spent + } else { + list = ""; + } + } + }, + + // Actual Callbacks object + self = { + + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + + // If we have memory from a past run, we should fire after adding + if ( memory && !firing ) { + firingIndex = list.length - 1; + queue.push( memory ); + } + + ( function add( args ) { + jQuery.each( args, function( _, arg ) { + if ( isFunction( arg ) ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && toType( arg ) !== "string" ) { + + // Inspect recursively + add( arg ); + } + } ); + } )( arguments ); + + if ( memory && !firing ) { + fire(); + } + } + return this; + }, + + // Remove a callback from the list + remove: function() { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + + // Handle firing indexes + if ( index <= firingIndex ) { + firingIndex--; + } + } + } ); + return this; + }, + + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? + jQuery.inArray( fn, list ) > -1 : + list.length > 0; + }, + + // Remove all callbacks from the list + empty: function() { + if ( list ) { + list = []; + } + return this; + }, + + // Disable .fire and .add + // Abort any current/pending executions + // Clear all callbacks and values + disable: function() { + locked = queue = []; + list = memory = ""; + return this; + }, + disabled: function() { + return !list; + }, + + // Disable .fire + // Also disable .add unless we have memory (since it would have no effect) + // Abort any pending executions + lock: function() { + locked = queue = []; + if ( !memory && !firing ) { + list = memory = ""; + } + return this; + }, + locked: function() { + return !!locked; + }, + + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( !locked ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + queue.push( args ); + if ( !firing ) { + fire(); + } + } + return this; + }, + + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +function Identity( v ) { + return v; +} +function Thrower( ex ) { + throw ex; +} + +function adoptValue( value, resolve, reject, noValue ) { + var method; + + try { + + // Check for promise aspect first to privilege synchronous behavior + if ( value && isFunction( ( method = value.promise ) ) ) { + method.call( value ).done( resolve ).fail( reject ); + + // Other thenables + } else if ( value && isFunction( ( method = value.then ) ) ) { + method.call( value, resolve, reject ); + + // Other non-thenables + } else { + + // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: + // * false: [ value ].slice( 0 ) => resolve( value ) + // * true: [ value ].slice( 1 ) => resolve() + resolve.apply( undefined, [ value ].slice( noValue ) ); + } + + // For Promises/A+, convert exceptions into rejections + // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in + // Deferred#then to conditionally suppress rejection. + } catch ( value ) { + + // Support: Android 4.0 only + // Strict mode functions invoked without .call/.apply get global-object context + reject.apply( undefined, [ value ] ); + } +} + +jQuery.extend( { + + Deferred: function( func ) { + var tuples = [ + + // action, add listener, callbacks, + // ... .then handlers, argument index, [final state] + [ "notify", "progress", jQuery.Callbacks( "memory" ), + jQuery.Callbacks( "memory" ), 2 ], + [ "resolve", "done", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 0, "resolved" ], + [ "reject", "fail", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 1, "rejected" ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + "catch": function( fn ) { + return promise.then( null, fn ); + }, + + // Keep pipe for back-compat + pipe: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + + return jQuery.Deferred( function( newDefer ) { + jQuery.each( tuples, function( i, tuple ) { + + // Map tuples (progress, done, fail) to arguments (done, fail, progress) + var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; + + // deferred.progress(function() { bind to newDefer or newDefer.notify }) + // deferred.done(function() { bind to newDefer or newDefer.resolve }) + // deferred.fail(function() { bind to newDefer or newDefer.reject }) + deferred[ tuple[ 1 ] ]( function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && isFunction( returned.promise ) ) { + returned.promise() + .progress( newDefer.notify ) + .done( newDefer.resolve ) + .fail( newDefer.reject ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( + this, + fn ? [ returned ] : arguments + ); + } + } ); + } ); + fns = null; + } ).promise(); + }, + then: function( onFulfilled, onRejected, onProgress ) { + var maxDepth = 0; + function resolve( depth, deferred, handler, special ) { + return function() { + var that = this, + args = arguments, + mightThrow = function() { + var returned, then; + + // Support: Promises/A+ section 2.3.3.3.3 + // https://promisesaplus.com/#point-59 + // Ignore double-resolution attempts + if ( depth < maxDepth ) { + return; + } + + returned = handler.apply( that, args ); + + // Support: Promises/A+ section 2.3.1 + // https://promisesaplus.com/#point-48 + if ( returned === deferred.promise() ) { + throw new TypeError( "Thenable self-resolution" ); + } + + // Support: Promises/A+ sections 2.3.3.1, 3.5 + // https://promisesaplus.com/#point-54 + // https://promisesaplus.com/#point-75 + // Retrieve `then` only once + then = returned && + + // Support: Promises/A+ section 2.3.4 + // https://promisesaplus.com/#point-64 + // Only check objects and functions for thenability + ( typeof returned === "object" || + typeof returned === "function" ) && + returned.then; + + // Handle a returned thenable + if ( isFunction( then ) ) { + + // Special processors (notify) just wait for resolution + if ( special ) { + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ) + ); + + // Normal processors (resolve) also hook into progress + } else { + + // ...and disregard older resolution values + maxDepth++; + + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ), + resolve( maxDepth, deferred, Identity, + deferred.notifyWith ) + ); + } + + // Handle all other returned values + } else { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Identity ) { + that = undefined; + args = [ returned ]; + } + + // Process the value(s) + // Default process is resolve + ( special || deferred.resolveWith )( that, args ); + } + }, + + // Only normal processors (resolve) catch and reject exceptions + process = special ? + mightThrow : + function() { + try { + mightThrow(); + } catch ( e ) { + + if ( jQuery.Deferred.exceptionHook ) { + jQuery.Deferred.exceptionHook( e, + process.stackTrace ); + } + + // Support: Promises/A+ section 2.3.3.3.4.1 + // https://promisesaplus.com/#point-61 + // Ignore post-resolution exceptions + if ( depth + 1 >= maxDepth ) { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Thrower ) { + that = undefined; + args = [ e ]; + } + + deferred.rejectWith( that, args ); + } + } + }; + + // Support: Promises/A+ section 2.3.3.3.1 + // https://promisesaplus.com/#point-57 + // Re-resolve promises immediately to dodge false rejection from + // subsequent errors + if ( depth ) { + process(); + } else { + + // Call an optional hook to record the stack, in case of exception + // since it's otherwise lost when execution goes async + if ( jQuery.Deferred.getStackHook ) { + process.stackTrace = jQuery.Deferred.getStackHook(); + } + window.setTimeout( process ); + } + }; + } + + return jQuery.Deferred( function( newDefer ) { + + // progress_handlers.add( ... ) + tuples[ 0 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onProgress ) ? + onProgress : + Identity, + newDefer.notifyWith + ) + ); + + // fulfilled_handlers.add( ... ) + tuples[ 1 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onFulfilled ) ? + onFulfilled : + Identity + ) + ); + + // rejected_handlers.add( ... ) + tuples[ 2 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onRejected ) ? + onRejected : + Thrower + ) + ); + } ).promise(); + }, + + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 5 ]; + + // promise.progress = list.add + // promise.done = list.add + // promise.fail = list.add + promise[ tuple[ 1 ] ] = list.add; + + // Handle state + if ( stateString ) { + list.add( + function() { + + // state = "resolved" (i.e., fulfilled) + // state = "rejected" + state = stateString; + }, + + // rejected_callbacks.disable + // fulfilled_callbacks.disable + tuples[ 3 - i ][ 2 ].disable, + + // rejected_handlers.disable + // fulfilled_handlers.disable + tuples[ 3 - i ][ 3 ].disable, + + // progress_callbacks.lock + tuples[ 0 ][ 2 ].lock, + + // progress_handlers.lock + tuples[ 0 ][ 3 ].lock + ); + } + + // progress_handlers.fire + // fulfilled_handlers.fire + // rejected_handlers.fire + list.add( tuple[ 3 ].fire ); + + // deferred.notify = function() { deferred.notifyWith(...) } + // deferred.resolve = function() { deferred.resolveWith(...) } + // deferred.reject = function() { deferred.rejectWith(...) } + deferred[ tuple[ 0 ] ] = function() { + deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); + return this; + }; + + // deferred.notifyWith = list.fireWith + // deferred.resolveWith = list.fireWith + // deferred.rejectWith = list.fireWith + deferred[ tuple[ 0 ] + "With" ] = list.fireWith; + } ); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( singleValue ) { + var + + // count of uncompleted subordinates + remaining = arguments.length, + + // count of unprocessed arguments + i = remaining, + + // subordinate fulfillment data + resolveContexts = Array( i ), + resolveValues = slice.call( arguments ), + + // the master Deferred + master = jQuery.Deferred(), + + // subordinate callback factory + updateFunc = function( i ) { + return function( value ) { + resolveContexts[ i ] = this; + resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( !( --remaining ) ) { + master.resolveWith( resolveContexts, resolveValues ); + } + }; + }; + + // Single- and empty arguments are adopted like Promise.resolve + if ( remaining <= 1 ) { + adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, + !remaining ); + + // Use .then() to unwrap secondary thenables (cf. gh-3000) + if ( master.state() === "pending" || + isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { + + return master.then(); + } + } + + // Multiple arguments are aggregated like Promise.all array elements + while ( i-- ) { + adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); + } + + return master.promise(); + } +} ); + + +// These usually indicate a programmer mistake during development, +// warn about them ASAP rather than swallowing them by default. +var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; + +jQuery.Deferred.exceptionHook = function( error, stack ) { + + // Support: IE 8 - 9 only + // Console exists when dev tools are open, which can happen at any time + if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { + window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); + } +}; + + + + +jQuery.readyException = function( error ) { + window.setTimeout( function() { + throw error; + } ); +}; + + + + +// The deferred used on DOM ready +var readyList = jQuery.Deferred(); + +jQuery.fn.ready = function( fn ) { + + readyList + .then( fn ) + + // Wrap jQuery.readyException in a function so that the lookup + // happens at the time of error handling instead of callback + // registration. + .catch( function( error ) { + jQuery.readyException( error ); + } ); + + return this; +}; + +jQuery.extend( { + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + } +} ); + +jQuery.ready.then = readyList.then; + +// The ready event handler and self cleanup method +function completed() { + document.removeEventListener( "DOMContentLoaded", completed ); + window.removeEventListener( "load", completed ); + jQuery.ready(); +} + +// Catch cases where $(document).ready() is called +// after the browser event has already occurred. +// Support: IE <=9 - 10 only +// Older IE sometimes signals "interactive" too soon +if ( document.readyState === "complete" || + ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { + + // Handle it asynchronously to allow scripts the opportunity to delay ready + window.setTimeout( jQuery.ready ); + +} else { + + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed ); +} + + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + len = elems.length, + bulk = key == null; + + // Sets many values + if ( toType( key ) === "object" ) { + chainable = true; + for ( i in key ) { + access( elems, fn, i, key[ i ], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < len; i++ ) { + fn( + elems[ i ], key, raw ? + value : + value.call( elems[ i ], i, fn( elems[ i ], key ) ) + ); + } + } + } + + if ( chainable ) { + return elems; + } + + // Gets + if ( bulk ) { + return fn.call( elems ); + } + + return len ? fn( elems[ 0 ], key ) : emptyGet; +}; + + +// Matches dashed string for camelizing +var rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g; + +// Used by camelCase as callback to replace() +function fcamelCase( all, letter ) { + return letter.toUpperCase(); +} + +// Convert dashed to camelCase; used by the css and data modules +// Support: IE <=9 - 11, Edge 12 - 15 +// Microsoft forgot to hump their vendor prefix (#9572) +function camelCase( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); +} +var acceptData = function( owner ) { + + // Accepts only: + // - Node + // - Node.ELEMENT_NODE + // - Node.DOCUMENT_NODE + // - Object + // - Any + return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); +}; + + + + +function Data() { + this.expando = jQuery.expando + Data.uid++; +} + +Data.uid = 1; + +Data.prototype = { + + cache: function( owner ) { + + // Check if the owner object already has a cache + var value = owner[ this.expando ]; + + // If not, create one + if ( !value ) { + value = {}; + + // We can accept data for non-element nodes in modern browsers, + // but we should not, see #8335. + // Always return an empty object. + if ( acceptData( owner ) ) { + + // If it is a node unlikely to be stringify-ed or looped over + // use plain assignment + if ( owner.nodeType ) { + owner[ this.expando ] = value; + + // Otherwise secure it in a non-enumerable property + // configurable must be true to allow the property to be + // deleted when data is removed + } else { + Object.defineProperty( owner, this.expando, { + value: value, + configurable: true + } ); + } + } + } + + return value; + }, + set: function( owner, data, value ) { + var prop, + cache = this.cache( owner ); + + // Handle: [ owner, key, value ] args + // Always use camelCase key (gh-2257) + if ( typeof data === "string" ) { + cache[ camelCase( data ) ] = value; + + // Handle: [ owner, { properties } ] args + } else { + + // Copy the properties one-by-one to the cache object + for ( prop in data ) { + cache[ camelCase( prop ) ] = data[ prop ]; + } + } + return cache; + }, + get: function( owner, key ) { + return key === undefined ? + this.cache( owner ) : + + // Always use camelCase key (gh-2257) + owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; + }, + access: function( owner, key, value ) { + + // In cases where either: + // + // 1. No key was specified + // 2. A string key was specified, but no value provided + // + // Take the "read" path and allow the get method to determine + // which value to return, respectively either: + // + // 1. The entire cache object + // 2. The data stored at the key + // + if ( key === undefined || + ( ( key && typeof key === "string" ) && value === undefined ) ) { + + return this.get( owner, key ); + } + + // When the key is not a string, or both a key and value + // are specified, set or extend (existing objects) with either: + // + // 1. An object of properties + // 2. A key and value + // + this.set( owner, key, value ); + + // Since the "set" path can have two possible entry points + // return the expected data based on which path was taken[*] + return value !== undefined ? value : key; + }, + remove: function( owner, key ) { + var i, + cache = owner[ this.expando ]; + + if ( cache === undefined ) { + return; + } + + if ( key !== undefined ) { + + // Support array or space separated string of keys + if ( Array.isArray( key ) ) { + + // If key is an array of keys... + // We always set camelCase keys, so remove that. + key = key.map( camelCase ); + } else { + key = camelCase( key ); + + // If a key with the spaces exists, use it. + // Otherwise, create an array by matching non-whitespace + key = key in cache ? + [ key ] : + ( key.match( rnothtmlwhite ) || [] ); + } + + i = key.length; + + while ( i-- ) { + delete cache[ key[ i ] ]; + } + } + + // Remove the expando if there's no more data + if ( key === undefined || jQuery.isEmptyObject( cache ) ) { + + // Support: Chrome <=35 - 45 + // Webkit & Blink performance suffers when deleting properties + // from DOM nodes, so set to undefined instead + // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) + if ( owner.nodeType ) { + owner[ this.expando ] = undefined; + } else { + delete owner[ this.expando ]; + } + } + }, + hasData: function( owner ) { + var cache = owner[ this.expando ]; + return cache !== undefined && !jQuery.isEmptyObject( cache ); + } +}; +var dataPriv = new Data(); + +var dataUser = new Data(); + + + +// Implementation Summary +// +// 1. Enforce API surface and semantic compatibility with 1.9.x branch +// 2. Improve the module's maintainability by reducing the storage +// paths to a single mechanism. +// 3. Use the same single mechanism to support "private" and "user" data. +// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) +// 5. Avoid exposing implementation details on user objects (eg. expando properties) +// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /[A-Z]/g; + +function getData( data ) { + if ( data === "true" ) { + return true; + } + + if ( data === "false" ) { + return false; + } + + if ( data === "null" ) { + return null; + } + + // Only convert to a number if it doesn't change the string + if ( data === +data + "" ) { + return +data; + } + + if ( rbrace.test( data ) ) { + return JSON.parse( data ); + } + + return data; +} + +function dataAttr( elem, key, data ) { + var name; + + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = getData( data ); + } catch ( e ) {} + + // Make sure we set the data so it isn't changed later + dataUser.set( elem, key, data ); + } else { + data = undefined; + } + } + return data; +} + +jQuery.extend( { + hasData: function( elem ) { + return dataUser.hasData( elem ) || dataPriv.hasData( elem ); + }, + + data: function( elem, name, data ) { + return dataUser.access( elem, name, data ); + }, + + removeData: function( elem, name ) { + dataUser.remove( elem, name ); + }, + + // TODO: Now that all calls to _data and _removeData have been replaced + // with direct calls to dataPriv methods, these can be deprecated. + _data: function( elem, name, data ) { + return dataPriv.access( elem, name, data ); + }, + + _removeData: function( elem, name ) { + dataPriv.remove( elem, name ); + } +} ); + +jQuery.fn.extend( { + data: function( key, value ) { + var i, name, data, + elem = this[ 0 ], + attrs = elem && elem.attributes; + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = dataUser.get( elem ); + + if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE 11 only + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = camelCase( name.slice( 5 ) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + dataPriv.set( elem, "hasDataAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each( function() { + dataUser.set( this, key ); + } ); + } + + return access( this, function( value ) { + var data; + + // The calling jQuery object (element matches) is not empty + // (and therefore has an element appears at this[ 0 ]) and the + // `value` parameter was not undefined. An empty jQuery object + // will result in `undefined` for elem = this[ 0 ] which will + // throw an exception if an attempt to read a data cache is made. + if ( elem && value === undefined ) { + + // Attempt to get data from the cache + // The key will always be camelCased in Data + data = dataUser.get( elem, key ); + if ( data !== undefined ) { + return data; + } + + // Attempt to "discover" the data in + // HTML5 custom data-* attrs + data = dataAttr( elem, key ); + if ( data !== undefined ) { + return data; + } + + // We tried really hard, but the data doesn't exist. + return; + } + + // Set the data... + this.each( function() { + + // We always store the camelCased key + dataUser.set( this, key, value ); + } ); + }, null, value, arguments.length > 1, null, true ); + }, + + removeData: function( key ) { + return this.each( function() { + dataUser.remove( this, key ); + } ); + } +} ); + + +jQuery.extend( { + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = dataPriv.get( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || Array.isArray( data ) ) { + queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // Clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // Not public - generate a queueHooks object, or return the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { + empty: jQuery.Callbacks( "once memory" ).add( function() { + dataPriv.remove( elem, [ type + "queue", key ] ); + } ) + } ); + } +} ); + +jQuery.fn.extend( { + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[ 0 ], type ); + } + + return data === undefined ? + this : + this.each( function() { + var queue = jQuery.queue( this, type, data ); + + // Ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + } ); + }, + dequeue: function( type ) { + return this.each( function() { + jQuery.dequeue( this, type ); + } ); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +} ); +var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; + +var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); + + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var documentElement = document.documentElement; + + + + var isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ); + }, + composed = { composed: true }; + + // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only + // Check attachment across shadow DOM boundaries when possible (gh-3504) + // Support: iOS 10.0-10.2 only + // Early iOS 10 versions support `attachShadow` but not `getRootNode`, + // leading to errors. We need to check for `getRootNode`. + if ( documentElement.getRootNode ) { + isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ) || + elem.getRootNode( composed ) === elem.ownerDocument; + }; + } +var isHiddenWithinTree = function( elem, el ) { + + // isHiddenWithinTree might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + + // Inline style trumps all + return elem.style.display === "none" || + elem.style.display === "" && + + // Otherwise, check computed style + // Support: Firefox <=43 - 45 + // Disconnected elements can have computed display: none, so first confirm that elem is + // in the document. + isAttached( elem ) && + + jQuery.css( elem, "display" ) === "none"; + }; + +var swap = function( elem, options, callback, args ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.apply( elem, args || [] ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + + + +function adjustCSS( elem, prop, valueParts, tween ) { + var adjusted, scale, + maxIterations = 20, + currentValue = tween ? + function() { + return tween.cur(); + } : + function() { + return jQuery.css( elem, prop, "" ); + }, + initial = currentValue(), + unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), + + // Starting value computation is required for potential unit mismatches + initialInUnit = elem.nodeType && + ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && + rcssNum.exec( jQuery.css( elem, prop ) ); + + if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { + + // Support: Firefox <=54 + // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) + initial = initial / 2; + + // Trust units reported by jQuery.css + unit = unit || initialInUnit[ 3 ]; + + // Iteratively approximate from a nonzero starting point + initialInUnit = +initial || 1; + + while ( maxIterations-- ) { + + // Evaluate and update our best guess (doubling guesses that zero out). + // Finish if the scale equals or crosses 1 (making the old*new product non-positive). + jQuery.style( elem, prop, initialInUnit + unit ); + if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { + maxIterations = 0; + } + initialInUnit = initialInUnit / scale; + + } + + initialInUnit = initialInUnit * 2; + jQuery.style( elem, prop, initialInUnit + unit ); + + // Make sure we update the tween properties later on + valueParts = valueParts || []; + } + + if ( valueParts ) { + initialInUnit = +initialInUnit || +initial || 0; + + // Apply relative offset (+=/-=) if specified + adjusted = valueParts[ 1 ] ? + initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : + +valueParts[ 2 ]; + if ( tween ) { + tween.unit = unit; + tween.start = initialInUnit; + tween.end = adjusted; + } + } + return adjusted; +} + + +var defaultDisplayMap = {}; + +function getDefaultDisplay( elem ) { + var temp, + doc = elem.ownerDocument, + nodeName = elem.nodeName, + display = defaultDisplayMap[ nodeName ]; + + if ( display ) { + return display; + } + + temp = doc.body.appendChild( doc.createElement( nodeName ) ); + display = jQuery.css( temp, "display" ); + + temp.parentNode.removeChild( temp ); + + if ( display === "none" ) { + display = "block"; + } + defaultDisplayMap[ nodeName ] = display; + + return display; +} + +function showHide( elements, show ) { + var display, elem, + values = [], + index = 0, + length = elements.length; + + // Determine new display value for elements that need to change + for ( ; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + + display = elem.style.display; + if ( show ) { + + // Since we force visibility upon cascade-hidden elements, an immediate (and slow) + // check is required in this first loop unless we have a nonempty display value (either + // inline or about-to-be-restored) + if ( display === "none" ) { + values[ index ] = dataPriv.get( elem, "display" ) || null; + if ( !values[ index ] ) { + elem.style.display = ""; + } + } + if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { + values[ index ] = getDefaultDisplay( elem ); + } + } else { + if ( display !== "none" ) { + values[ index ] = "none"; + + // Remember what we're overwriting + dataPriv.set( elem, "display", display ); + } + } + } + + // Set the display of the elements in a second loop to avoid constant reflow + for ( index = 0; index < length; index++ ) { + if ( values[ index ] != null ) { + elements[ index ].style.display = values[ index ]; + } + } + + return elements; +} + +jQuery.fn.extend( { + show: function() { + return showHide( this, true ); + }, + hide: function() { + return showHide( this ); + }, + toggle: function( state ) { + if ( typeof state === "boolean" ) { + return state ? this.show() : this.hide(); + } + + return this.each( function() { + if ( isHiddenWithinTree( this ) ) { + jQuery( this ).show(); + } else { + jQuery( this ).hide(); + } + } ); + } +} ); +var rcheckableType = ( /^(?:checkbox|radio)$/i ); + +var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); + +var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); + + + +// We have to close these tags to support XHTML (#13200) +var wrapMap = { + + // Support: IE <=9 only + option: [ 1, "" ], + + // XHTML parsers do not magically insert elements in the + // same way that tag soup parsers do. So we cannot shorten + // this by omitting or other required elements. + thead: [ 1, "", "
" ], + col: [ 2, "", "
" ], + tr: [ 2, "", "
" ], + td: [ 3, "", "
" ], + + _default: [ 0, "", "" ] +}; + +// Support: IE <=9 only +wrapMap.optgroup = wrapMap.option; + +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + + +function getAll( context, tag ) { + + // Support: IE <=9 - 11 only + // Use typeof to avoid zero-argument method invocation on host objects (#15151) + var ret; + + if ( typeof context.getElementsByTagName !== "undefined" ) { + ret = context.getElementsByTagName( tag || "*" ); + + } else if ( typeof context.querySelectorAll !== "undefined" ) { + ret = context.querySelectorAll( tag || "*" ); + + } else { + ret = []; + } + + if ( tag === undefined || tag && nodeName( context, tag ) ) { + return jQuery.merge( [ context ], ret ); + } + + return ret; +} + + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + dataPriv.set( + elems[ i ], + "globalEval", + !refElements || dataPriv.get( refElements[ i ], "globalEval" ) + ); + } +} + + +var rhtml = /<|&#?\w+;/; + +function buildFragment( elems, context, scripts, selection, ignored ) { + var elem, tmp, tag, wrap, attached, j, + fragment = context.createDocumentFragment(), + nodes = [], + i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( toType( elem ) === "object" ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); + + // Deserialize a standard representation + tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; + + // Descend through wrappers to the right content + j = wrap[ 0 ]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, tmp.childNodes ); + + // Remember the top-level container + tmp = fragment.firstChild; + + // Ensure the created nodes are orphaned (#12392) + tmp.textContent = ""; + } + } + } + + // Remove wrapper from fragment + fragment.textContent = ""; + + i = 0; + while ( ( elem = nodes[ i++ ] ) ) { + + // Skip elements already in the context collection (trac-4087) + if ( selection && jQuery.inArray( elem, selection ) > -1 ) { + if ( ignored ) { + ignored.push( elem ); + } + continue; + } + + attached = isAttached( elem ); + + // Append to fragment + tmp = getAll( fragment.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( attached ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( ( elem = tmp[ j++ ] ) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + return fragment; +} + + +( function() { + var fragment = document.createDocumentFragment(), + div = fragment.appendChild( document.createElement( "div" ) ), + input = document.createElement( "input" ); + + // Support: Android 4.0 - 4.3 only + // Check state lost if the name is set (#11217) + // Support: Windows Web Apps (WWA) + // `name` and `type` must use .setAttribute for WWA (#14901) + input.setAttribute( "type", "radio" ); + input.setAttribute( "checked", "checked" ); + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + + // Support: Android <=4.1 only + // Older WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE <=11 only + // Make sure textarea (and checkbox) defaultValue is properly cloned + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; +} )(); + + +var + rkeyEvent = /^key/, + rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, + rtypenamespace = /^([^.]*)(?:\.(.+)|)/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +// Support: IE <=9 - 11+ +// focus() and blur() are asynchronous, except when they are no-op. +// So expect focus to be synchronous when the element is already active, +// and blur to be synchronous when the element is not already active. +// (focus and blur are always synchronous in other supported browsers, +// this just defines when we can count on it). +function expectSync( elem, type ) { + return ( elem === safeActiveElement() ) === ( type === "focus" ); +} + +// Support: IE <=9 only +// Accessing document.activeElement can throw unexpectedly +// https://bugs.jquery.com/ticket/13393 +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +function on( elem, types, selector, data, fn, one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + on( elem, type, selector, data, types[ type ], one ); + } + return elem; + } + + if ( data == null && fn == null ) { + + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return elem; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return elem.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + } ); +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + + var handleObjIn, eventHandle, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.get( elem ); + + // Don't attach events to noData or text/comment nodes (but allow plain objects) + if ( !elemData ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Ensure that invalid selectors throw exceptions at attach time + // Evaluate against documentElement in case elem is a non-element node (e.g., document) + if ( selector ) { + jQuery.find.matchesSelector( documentElement, selector ); + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !( events = elemData.events ) ) { + events = elemData.events = {}; + } + if ( !( eventHandle = elemData.handle ) ) { + eventHandle = elemData.handle = function( e ) { + + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? + jQuery.event.dispatch.apply( elem, arguments ) : undefined; + }; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend( { + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join( "." ) + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !( handlers = events[ type ] ) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener if the special events handler returns false + if ( !special.setup || + special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var j, origCount, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); + + if ( !elemData || !( events = elemData.events ) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[ 2 ] && + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || + selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || + special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove data and the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + dataPriv.remove( elem, "handle events" ); + } + }, + + dispatch: function( nativeEvent ) { + + // Make a writable jQuery.Event from the native event object + var event = jQuery.event.fix( nativeEvent ); + + var i, j, ret, matched, handleObj, handlerQueue, + args = new Array( arguments.length ), + handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[ 0 ] = event; + + for ( i = 1; i < arguments.length; i++ ) { + args[ i ] = arguments[ i ]; + } + + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( ( handleObj = matched.handlers[ j++ ] ) && + !event.isImmediatePropagationStopped() ) { + + // If the event is namespaced, then each handler is only invoked if it is + // specially universal or its namespaces are a superset of the event's. + if ( !event.rnamespace || handleObj.namespace === false || + event.rnamespace.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || + handleObj.handler ).apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( ( event.result = ret ) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var i, handleObj, sel, matchedHandlers, matchedSelectors, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + if ( delegateCount && + + // Support: IE <=9 + // Black-hole SVG instance trees (trac-13180) + cur.nodeType && + + // Support: Firefox <=42 + // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) + // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click + // Support: IE 11 only + // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) + !( event.type === "click" && event.button >= 1 ) ) { + + for ( ; cur !== this; cur = cur.parentNode || this ) { + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { + matchedHandlers = []; + matchedSelectors = {}; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matchedSelectors[ sel ] === undefined ) { + matchedSelectors[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) > -1 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matchedSelectors[ sel ] ) { + matchedHandlers.push( handleObj ); + } + } + if ( matchedHandlers.length ) { + handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); + } + } + } + } + + // Add the remaining (directly-bound) handlers + cur = this; + if ( delegateCount < handlers.length ) { + handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); + } + + return handlerQueue; + }, + + addProp: function( name, hook ) { + Object.defineProperty( jQuery.Event.prototype, name, { + enumerable: true, + configurable: true, + + get: isFunction( hook ) ? + function() { + if ( this.originalEvent ) { + return hook( this.originalEvent ); + } + } : + function() { + if ( this.originalEvent ) { + return this.originalEvent[ name ]; + } + }, + + set: function( value ) { + Object.defineProperty( this, name, { + enumerable: true, + configurable: true, + writable: true, + value: value + } ); + } + } ); + }, + + fix: function( originalEvent ) { + return originalEvent[ jQuery.expando ] ? + originalEvent : + new jQuery.Event( originalEvent ); + }, + + special: { + load: { + + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + click: { + + // Utilize native event to ensure correct state for checkable inputs + setup: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Claim the first handler + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + // dataPriv.set( el, "click", ... ) + leverageNative( el, "click", returnTrue ); + } + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Force setup before triggering a click + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + leverageNative( el, "click" ); + } + + // Return non-false to allow normal event-path propagation + return true; + }, + + // For cross-browser consistency, suppress native .click() on links + // Also prevent it if we're currently inside a leveraged native-event stack + _default: function( event ) { + var target = event.target; + return rcheckableType.test( target.type ) && + target.click && nodeName( target, "input" ) && + dataPriv.get( target, "click" ) || + nodeName( target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + } +}; + +// Ensure the presence of an event listener that handles manually-triggered +// synthetic events by interrupting progress until reinvoked in response to +// *native* events that it fires directly, ensuring that state changes have +// already occurred before other listeners are invoked. +function leverageNative( el, type, expectSync ) { + + // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add + if ( !expectSync ) { + if ( dataPriv.get( el, type ) === undefined ) { + jQuery.event.add( el, type, returnTrue ); + } + return; + } + + // Register the controller as a special universal handler for all event namespaces + dataPriv.set( el, type, false ); + jQuery.event.add( el, type, { + namespace: false, + handler: function( event ) { + var notAsync, result, + saved = dataPriv.get( this, type ); + + if ( ( event.isTrigger & 1 ) && this[ type ] ) { + + // Interrupt processing of the outer synthetic .trigger()ed event + // Saved data should be false in such cases, but might be a leftover capture object + // from an async native handler (gh-4350) + if ( !saved.length ) { + + // Store arguments for use when handling the inner native event + // There will always be at least one argument (an event object), so this array + // will not be confused with a leftover capture object. + saved = slice.call( arguments ); + dataPriv.set( this, type, saved ); + + // Trigger the native event and capture its result + // Support: IE <=9 - 11+ + // focus() and blur() are asynchronous + notAsync = expectSync( this, type ); + this[ type ](); + result = dataPriv.get( this, type ); + if ( saved !== result || notAsync ) { + dataPriv.set( this, type, false ); + } else { + result = {}; + } + if ( saved !== result ) { + + // Cancel the outer synthetic event + event.stopImmediatePropagation(); + event.preventDefault(); + return result.value; + } + + // If this is an inner synthetic event for an event with a bubbling surrogate + // (focus or blur), assume that the surrogate already propagated from triggering the + // native event and prevent that from happening again here. + // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the + // bubbling surrogate propagates *after* the non-bubbling base), but that seems + // less bad than duplication. + } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { + event.stopPropagation(); + } + + // If this is a native event triggered above, everything is now in order + // Fire an inner synthetic event with the original arguments + } else if ( saved.length ) { + + // ...and capture the result + dataPriv.set( this, type, { + value: jQuery.event.trigger( + + // Support: IE <=9 - 11+ + // Extend with the prototype to reset the above stopImmediatePropagation() + jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), + saved.slice( 1 ), + this + ) + } ); + + // Abort handling of the native event + event.stopImmediatePropagation(); + } + } + } ); +} + +jQuery.removeEvent = function( elem, type, handle ) { + + // This "if" is needed for plain objects + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle ); + } +}; + +jQuery.Event = function( src, props ) { + + // Allow instantiation without the 'new' keyword + if ( !( this instanceof jQuery.Event ) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + + // Support: Android <=2.3 only + src.returnValue === false ? + returnTrue : + returnFalse; + + // Create target properties + // Support: Safari <=6 - 7 only + // Target should not be a text node (#504, #13143) + this.target = ( src.target && src.target.nodeType === 3 ) ? + src.target.parentNode : + src.target; + + this.currentTarget = src.currentTarget; + this.relatedTarget = src.relatedTarget; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || Date.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + constructor: jQuery.Event, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + isSimulated: false, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + + if ( e && !this.isSimulated ) { + e.preventDefault(); + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopPropagation(); + } + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Includes all common event props including KeyEvent and MouseEvent specific props +jQuery.each( { + altKey: true, + bubbles: true, + cancelable: true, + changedTouches: true, + ctrlKey: true, + detail: true, + eventPhase: true, + metaKey: true, + pageX: true, + pageY: true, + shiftKey: true, + view: true, + "char": true, + code: true, + charCode: true, + key: true, + keyCode: true, + button: true, + buttons: true, + clientX: true, + clientY: true, + offsetX: true, + offsetY: true, + pointerId: true, + pointerType: true, + screenX: true, + screenY: true, + targetTouches: true, + toElement: true, + touches: true, + + which: function( event ) { + var button = event.button; + + // Add which for key events + if ( event.which == null && rkeyEvent.test( event.type ) ) { + return event.charCode != null ? event.charCode : event.keyCode; + } + + // Add which for click: 1 === left; 2 === middle; 3 === right + if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { + if ( button & 1 ) { + return 1; + } + + if ( button & 2 ) { + return 3; + } + + if ( button & 4 ) { + return 2; + } + + return 0; + } + + return event.which; + } +}, jQuery.event.addProp ); + +jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { + jQuery.event.special[ type ] = { + + // Utilize native event if possible so blur/focus sequence is correct + setup: function() { + + // Claim the first handler + // dataPriv.set( this, "focus", ... ) + // dataPriv.set( this, "blur", ... ) + leverageNative( this, type, expectSync ); + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function() { + + // Force setup before trigger + leverageNative( this, type ); + + // Return non-false to allow normal event-path propagation + return true; + }, + + delegateType: delegateType + }; +} ); + +// Create mouseenter/leave events using mouseover/out and event-time checks +// so that event delegation works in jQuery. +// Do the same for pointerenter/pointerleave and pointerover/pointerout +// +// Support: Safari 7 only +// Safari sends mouseenter too often; see: +// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 +// for the description of the bug (it existed in older Chrome versions as well). +jQuery.each( { + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mouseenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +} ); + +jQuery.fn.extend( { + + on: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn ); + }, + one: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? + handleObj.origType + "." + handleObj.namespace : + handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each( function() { + jQuery.event.remove( this, types, fn, selector ); + } ); + } +} ); + + +var + + /* eslint-disable max-len */ + + // See https://github.com/eslint/eslint/issues/3229 + rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, + + /* eslint-enable */ + + // Support: IE <=10 - 11, Edge 12 - 13 only + // In IE/Edge using regex groups here causes severe slowdowns. + // See https://connect.microsoft.com/IE/feedback/details/1736512/ + rnoInnerhtml = /\s*$/g; + +// Prefer a tbody over its parent table for containing new rows +function manipulationTarget( elem, content ) { + if ( nodeName( elem, "table" ) && + nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { + + return jQuery( elem ).children( "tbody" )[ 0 ] || elem; + } + + return elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { + elem.type = elem.type.slice( 5 ); + } else { + elem.removeAttribute( "type" ); + } + + return elem; +} + +function cloneCopyEvent( src, dest ) { + var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; + + if ( dest.nodeType !== 1 ) { + return; + } + + // 1. Copy private data: events, handlers, etc. + if ( dataPriv.hasData( src ) ) { + pdataOld = dataPriv.access( src ); + pdataCur = dataPriv.set( dest, pdataOld ); + events = pdataOld.events; + + if ( events ) { + delete pdataCur.handle; + pdataCur.events = {}; + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + } + + // 2. Copy user data + if ( dataUser.hasData( src ) ) { + udataOld = dataUser.access( src ); + udataCur = jQuery.extend( {}, udataOld ); + + dataUser.set( dest, udataCur ); + } +} + +// Fix IE bugs, see support tests +function fixInput( src, dest ) { + var nodeName = dest.nodeName.toLowerCase(); + + // Fails to persist the checked state of a cloned checkbox or radio button. + if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + dest.checked = src.checked; + + // Fails to return the selected option to the default selected state when cloning options + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +function domManip( collection, args, callback, ignored ) { + + // Flatten any nested arrays + args = concat.apply( [], args ); + + var fragment, first, scripts, hasScripts, node, doc, + i = 0, + l = collection.length, + iNoClone = l - 1, + value = args[ 0 ], + valueIsFunction = isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( valueIsFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return collection.each( function( index ) { + var self = collection.eq( index ); + if ( valueIsFunction ) { + args[ 0 ] = value.call( this, index, self.html() ); + } + domManip( self, args, callback, ignored ); + } ); + } + + if ( l ) { + fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + // Require either new content or an interest in ignored elements to invoke the callback + if ( first || ignored ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item + // instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( collection[ i ], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !dataPriv.access( node, "globalEval" ) && + jQuery.contains( doc, node ) ) { + + if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { + + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl && !node.noModule ) { + jQuery._evalUrl( node.src, { + nonce: node.nonce || node.getAttribute( "nonce" ) + } ); + } + } else { + DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); + } + } + } + } + } + } + + return collection; +} + +function remove( elem, selector, keepData ) { + var node, + nodes = selector ? jQuery.filter( selector, elem ) : elem, + i = 0; + + for ( ; ( node = nodes[ i ] ) != null; i++ ) { + if ( !keepData && node.nodeType === 1 ) { + jQuery.cleanData( getAll( node ) ); + } + + if ( node.parentNode ) { + if ( keepData && isAttached( node ) ) { + setGlobalEval( getAll( node, "script" ) ); + } + node.parentNode.removeChild( node ); + } + } + + return elem; +} + +jQuery.extend( { + htmlPrefilter: function( html ) { + return html.replace( rxhtmlTag, "<$1>" ); + }, + + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var i, l, srcElements, destElements, + clone = elem.cloneNode( true ), + inPage = isAttached( elem ); + + // Fix IE cloning issues + if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && + !jQuery.isXMLDoc( elem ) ) { + + // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + fixInput( srcElements[ i ], destElements[ i ] ); + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + cloneCopyEvent( srcElements[ i ], destElements[ i ] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + // Return the cloned set + return clone; + }, + + cleanData: function( elems ) { + var data, elem, type, + special = jQuery.event.special, + i = 0; + + for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { + if ( acceptData( elem ) ) { + if ( ( data = elem[ dataPriv.expando ] ) ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataPriv.expando ] = undefined; + } + if ( elem[ dataUser.expando ] ) { + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataUser.expando ] = undefined; + } + } + } + } +} ); + +jQuery.fn.extend( { + detach: function( selector ) { + return remove( this, selector, true ); + }, + + remove: function( selector ) { + return remove( this, selector ); + }, + + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().each( function() { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + this.textContent = value; + } + } ); + }, null, value, arguments.length ); + }, + + append: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + } ); + }, + + prepend: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + } ); + }, + + before: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + } ); + }, + + after: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + } ); + }, + + empty: function() { + var elem, + i = 0; + + for ( ; ( elem = this[ i ] ) != null; i++ ) { + if ( elem.nodeType === 1 ) { + + // Prevent memory leaks + jQuery.cleanData( getAll( elem, false ) ); + + // Remove any remaining nodes + elem.textContent = ""; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + } ); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined && elem.nodeType === 1 ) { + return elem.innerHTML; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { + + value = jQuery.htmlPrefilter( value ); + + try { + for ( ; i < l; i++ ) { + elem = this[ i ] || {}; + + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch ( e ) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var ignored = []; + + // Make the changes, replacing each non-ignored context element with the new content + return domManip( this, arguments, function( elem ) { + var parent = this.parentNode; + + if ( jQuery.inArray( this, ignored ) < 0 ) { + jQuery.cleanData( getAll( this ) ); + if ( parent ) { + parent.replaceChild( elem, this ); + } + } + + // Force callback invocation + }, ignored ); + } +} ); + +jQuery.each( { + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1, + i = 0; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone( true ); + jQuery( insert[ i ] )[ original ]( elems ); + + // Support: Android <=4.0 only, PhantomJS 1 only + // .get() because push.apply(_, arraylike) throws on ancient WebKit + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +} ); +var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); + +var getStyles = function( elem ) { + + // Support: IE <=11 only, Firefox <=30 (#15098, #14150) + // IE throws on elements created in popups + // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" + var view = elem.ownerDocument.defaultView; + + if ( !view || !view.opener ) { + view = window; + } + + return view.getComputedStyle( elem ); + }; + +var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); + + + +( function() { + + // Executing both pixelPosition & boxSizingReliable tests require only one layout + // so they're executed at the same time to save the second computation. + function computeStyleTests() { + + // This is a singleton, we need to execute it only once + if ( !div ) { + return; + } + + container.style.cssText = "position:absolute;left:-11111px;width:60px;" + + "margin-top:1px;padding:0;border:0"; + div.style.cssText = + "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + + "margin:auto;border:1px;padding:1px;" + + "width:60%;top:1%"; + documentElement.appendChild( container ).appendChild( div ); + + var divStyle = window.getComputedStyle( div ); + pixelPositionVal = divStyle.top !== "1%"; + + // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 + reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; + + // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 + // Some styles come back with percentage values, even though they shouldn't + div.style.right = "60%"; + pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; + + // Support: IE 9 - 11 only + // Detect misreporting of content dimensions for box-sizing:border-box elements + boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; + + // Support: IE 9 only + // Detect overflow:scroll screwiness (gh-3699) + // Support: Chrome <=64 + // Don't get tricked when zoom affects offsetWidth (gh-4029) + div.style.position = "absolute"; + scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; + + documentElement.removeChild( container ); + + // Nullify the div so it wouldn't be stored in the memory and + // it will also be a sign that checks already performed + div = null; + } + + function roundPixelMeasures( measure ) { + return Math.round( parseFloat( measure ) ); + } + + var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, + reliableMarginLeftVal, + container = document.createElement( "div" ), + div = document.createElement( "div" ); + + // Finish early in limited (non-browser) environments + if ( !div.style ) { + return; + } + + // Support: IE <=9 - 11 only + // Style of cloned element affects source element cloned (#8908) + div.style.backgroundClip = "content-box"; + div.cloneNode( true ).style.backgroundClip = ""; + support.clearCloneStyle = div.style.backgroundClip === "content-box"; + + jQuery.extend( support, { + boxSizingReliable: function() { + computeStyleTests(); + return boxSizingReliableVal; + }, + pixelBoxStyles: function() { + computeStyleTests(); + return pixelBoxStylesVal; + }, + pixelPosition: function() { + computeStyleTests(); + return pixelPositionVal; + }, + reliableMarginLeft: function() { + computeStyleTests(); + return reliableMarginLeftVal; + }, + scrollboxSize: function() { + computeStyleTests(); + return scrollboxSizeVal; + } + } ); +} )(); + + +function curCSS( elem, name, computed ) { + var width, minWidth, maxWidth, ret, + + // Support: Firefox 51+ + // Retrieving style before computed somehow + // fixes an issue with getting wrong values + // on detached elements + style = elem.style; + + computed = computed || getStyles( elem ); + + // getPropertyValue is needed for: + // .css('filter') (IE 9 only, #12537) + // .css('--customProperty) (#3144) + if ( computed ) { + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !isAttached( elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Android Browser returns percentage for some values, + // but width seems to be reliably pixels. + // This is against the CSSOM draft spec: + // https://drafts.csswg.org/cssom/#resolved-values + if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { + + // Remember the original values + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + // Put in the new values to get a computed value out + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + // Revert the changed values + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret !== undefined ? + + // Support: IE <=9 - 11 only + // IE returns zIndex value as an integer. + ret + "" : + ret; +} + + +function addGetHookIf( conditionFn, hookFn ) { + + // Define the hook, we'll check on the first run if it's really needed. + return { + get: function() { + if ( conditionFn() ) { + + // Hook not needed (or it's not possible to use it due + // to missing dependency), remove it. + delete this.get; + return; + } + + // Hook needed; redefine it so that the support test is not executed again. + return ( this.get = hookFn ).apply( this, arguments ); + } + }; +} + + +var cssPrefixes = [ "Webkit", "Moz", "ms" ], + emptyStyle = document.createElement( "div" ).style, + vendorProps = {}; + +// Return a vendor-prefixed property or undefined +function vendorPropName( name ) { + + // Check for vendor prefixed names + var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in emptyStyle ) { + return name; + } + } +} + +// Return a potentially-mapped jQuery.cssProps or vendor prefixed property +function finalPropName( name ) { + var final = jQuery.cssProps[ name ] || vendorProps[ name ]; + + if ( final ) { + return final; + } + if ( name in emptyStyle ) { + return name; + } + return vendorProps[ name ] = vendorPropName( name ) || name; +} + + +var + + // Swappable if display is none or starts with table + // except "table", "table-cell", or "table-caption" + // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + rcustomProp = /^--/, + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: "0", + fontWeight: "400" + }; + +function setPositiveNumber( elem, value, subtract ) { + + // Any relative (+/-) values have already been + // normalized at this point + var matches = rcssNum.exec( value ); + return matches ? + + // Guard against undefined "subtract", e.g., when used as in cssHooks + Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : + value; +} + +function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { + var i = dimension === "width" ? 1 : 0, + extra = 0, + delta = 0; + + // Adjustment may not be necessary + if ( box === ( isBorderBox ? "border" : "content" ) ) { + return 0; + } + + for ( ; i < 4; i += 2 ) { + + // Both box models exclude margin + if ( box === "margin" ) { + delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); + } + + // If we get here with a content-box, we're seeking "padding" or "border" or "margin" + if ( !isBorderBox ) { + + // Add padding + delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + + // For "border" or "margin", add border + if ( box !== "padding" ) { + delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + + // But still keep track of it otherwise + } else { + extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + + // If we get here with a border-box (content + padding + border), we're seeking "content" or + // "padding" or "margin" + } else { + + // For "content", subtract padding + if ( box === "content" ) { + delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + } + + // For "content" or "padding", subtract border + if ( box !== "margin" ) { + delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } + } + + // Account for positive content-box scroll gutter when requested by providing computedVal + if ( !isBorderBox && computedVal >= 0 ) { + + // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border + // Assuming integer scroll gutter, subtract the rest and round down + delta += Math.max( 0, Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + computedVal - + delta - + extra - + 0.5 + + // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter + // Use an explicit zero to avoid NaN (gh-3964) + ) ) || 0; + } + + return delta; +} + +function getWidthOrHeight( elem, dimension, extra ) { + + // Start with computed style + var styles = getStyles( elem ), + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). + // Fake content-box until we know it's needed to know the true value. + boxSizingNeeded = !support.boxSizingReliable() || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + valueIsBorderBox = isBorderBox, + + val = curCSS( elem, dimension, styles ), + offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); + + // Support: Firefox <=54 + // Return a confounding non-pixel value or feign ignorance, as appropriate. + if ( rnumnonpx.test( val ) ) { + if ( !extra ) { + return val; + } + val = "auto"; + } + + + // Fall back to offsetWidth/offsetHeight when value is "auto" + // This happens for inline elements with no explicit setting (gh-3571) + // Support: Android <=4.1 - 4.3 only + // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) + // Support: IE 9-11 only + // Also use offsetWidth/offsetHeight for when box sizing is unreliable + // We use getClientRects() to check for hidden/disconnected. + // In those cases, the computed value can be trusted to be border-box + if ( ( !support.boxSizingReliable() && isBorderBox || + val === "auto" || + !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && + elem.getClientRects().length ) { + + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; + + // Where available, offsetWidth/offsetHeight approximate border box dimensions. + // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the + // retrieved value as a content box dimension. + valueIsBorderBox = offsetProp in elem; + if ( valueIsBorderBox ) { + val = elem[ offsetProp ]; + } + } + + // Normalize "" and auto + val = parseFloat( val ) || 0; + + // Adjust for the element's box model + return ( val + + boxModelAdjustment( + elem, + dimension, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox, + styles, + + // Provide the current computed size to request scroll gutter calculation (gh-3589) + val + ) + ) + "px"; +} + +jQuery.extend( { + + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + } + } + } + }, + + // Don't automatically add "px" to these possibly-unitless properties + cssNumber: { + "animationIterationCount": true, + "columnCount": true, + "fillOpacity": true, + "flexGrow": true, + "flexShrink": true, + "fontWeight": true, + "gridArea": true, + "gridColumn": true, + "gridColumnEnd": true, + "gridColumnStart": true, + "gridRow": true, + "gridRowEnd": true, + "gridRowStart": true, + "lineHeight": true, + "opacity": true, + "order": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: {}, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ), + style = elem.style; + + // Make sure that we're working with the right name. We don't + // want to query the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Gets hook for the prefixed version, then unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // Convert "+=" or "-=" to relative numbers (#7345) + if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { + value = adjustCSS( elem, name, ret ); + + // Fixes bug #9237 + type = "number"; + } + + // Make sure that null and NaN values aren't set (#7116) + if ( value == null || value !== value ) { + return; + } + + // If a number was passed in, add the unit (except for certain CSS properties) + // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append + // "px" to a few hardcoded values. + if ( type === "number" && !isCustomProp ) { + value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); + } + + // background-* props affect original clone's values + if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { + style[ name ] = "inherit"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !( "set" in hooks ) || + ( value = hooks.set( elem, value, extra ) ) !== undefined ) { + + if ( isCustomProp ) { + style.setProperty( name, value ); + } else { + style[ name ] = value; + } + } + + } else { + + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && + ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { + + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra, styles ) { + var val, num, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ); + + // Make sure that we're working with the right name. We don't + // want to modify the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Try prefixed name followed by the unprefixed name + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name, styles ); + } + + // Convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Make numeric if forced or a qualifier was provided and val looks numeric + if ( extra === "" || extra ) { + num = parseFloat( val ); + return extra === true || isFinite( num ) ? num || 0 : val; + } + + return val; + } +} ); + +jQuery.each( [ "height", "width" ], function( i, dimension ) { + jQuery.cssHooks[ dimension ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + + // Certain elements can have dimension info if we invisibly show them + // but it must have a current display style that would benefit + return rdisplayswap.test( jQuery.css( elem, "display" ) ) && + + // Support: Safari 8+ + // Table columns in Safari have non-zero offsetWidth & zero + // getBoundingClientRect().width unless display is changed. + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? + swap( elem, cssShow, function() { + return getWidthOrHeight( elem, dimension, extra ); + } ) : + getWidthOrHeight( elem, dimension, extra ); + } + }, + + set: function( elem, value, extra ) { + var matches, + styles = getStyles( elem ), + + // Only read styles.position if the test has a chance to fail + // to avoid forcing a reflow. + scrollboxSizeBuggy = !support.scrollboxSize() && + styles.position === "absolute", + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) + boxSizingNeeded = scrollboxSizeBuggy || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + subtract = extra ? + boxModelAdjustment( + elem, + dimension, + extra, + isBorderBox, + styles + ) : + 0; + + // Account for unreliable border-box dimensions by comparing offset* to computed and + // faking a content-box to get border and padding (gh-3699) + if ( isBorderBox && scrollboxSizeBuggy ) { + subtract -= Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + parseFloat( styles[ dimension ] ) - + boxModelAdjustment( elem, dimension, "border", false, styles ) - + 0.5 + ); + } + + // Convert to pixels if value adjustment is needed + if ( subtract && ( matches = rcssNum.exec( value ) ) && + ( matches[ 3 ] || "px" ) !== "px" ) { + + elem.style[ dimension ] = value; + value = jQuery.css( elem, dimension ); + } + + return setPositiveNumber( elem, value, subtract ); + } + }; +} ); + +jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, + function( elem, computed ) { + if ( computed ) { + return ( parseFloat( curCSS( elem, "marginLeft" ) ) || + elem.getBoundingClientRect().left - + swap( elem, { marginLeft: 0 }, function() { + return elem.getBoundingClientRect().left; + } ) + ) + "px"; + } + } +); + +// These hooks are used by animate to expand properties +jQuery.each( { + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i = 0, + expanded = {}, + + // Assumes a single number if not a string + parts = typeof value === "string" ? value.split( " " ) : [ value ]; + + for ( ; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( prefix !== "margin" ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +} ); + +jQuery.fn.extend( { + css: function( name, value ) { + return access( this, function( elem, name, value ) { + var styles, len, + map = {}, + i = 0; + + if ( Array.isArray( name ) ) { + styles = getStyles( elem ); + len = name.length; + + for ( ; i < len; i++ ) { + map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); + } + + return map; + } + + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + } +} ); + + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || jQuery.easing._default; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + // Use a property on the element directly when it is not a DOM element, + // or when there is no matching style property that exists. + if ( tween.elem.nodeType !== 1 || + tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { + return tween.elem[ tween.prop ]; + } + + // Passing an empty string as a 3rd parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails. + // Simple values such as "10px" are parsed to Float; + // complex values such as "rotate(1rad)" are returned as-is. + result = jQuery.css( tween.elem, tween.prop, "" ); + + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + + // Use step hook for back compat. + // Use cssHook if its there. + // Use .style if available and use plain properties where available. + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.nodeType === 1 && ( + jQuery.cssHooks[ tween.prop ] || + tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Support: IE <=9 only +// Panic based approach to setting things on disconnected nodes +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p * Math.PI ) / 2; + }, + _default: "swing" +}; + +jQuery.fx = Tween.prototype.init; + +// Back compat <1.8 extension point +jQuery.fx.step = {}; + + + + +var + fxNow, inProgress, + rfxtypes = /^(?:toggle|show|hide)$/, + rrun = /queueHooks$/; + +function schedule() { + if ( inProgress ) { + if ( document.hidden === false && window.requestAnimationFrame ) { + window.requestAnimationFrame( schedule ); + } else { + window.setTimeout( schedule, jQuery.fx.interval ); + } + + jQuery.fx.tick(); + } +} + +// Animations created synchronously will run synchronously +function createFxNow() { + window.setTimeout( function() { + fxNow = undefined; + } ); + return ( fxNow = Date.now() ); +} + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + i = 0, + attrs = { height: type }; + + // If we include width, step value is 1 to do all cssExpand values, + // otherwise step value is 2 to skip over Left and Right + includeWidth = includeWidth ? 1 : 0; + for ( ; i < 4; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +function createTween( value, prop, animation ) { + var tween, + collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { + + // We're done with this property + return tween; + } + } +} + +function defaultPrefilter( elem, props, opts ) { + var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, + isBox = "width" in props || "height" in props, + anim = this, + orig = {}, + style = elem.style, + hidden = elem.nodeType && isHiddenWithinTree( elem ), + dataShow = dataPriv.get( elem, "fxshow" ); + + // Queue-skipping animations hijack the fx hooks + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always( function() { + + // Ensure the complete handler is called before this completes + anim.always( function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + } ); + } ); + } + + // Detect show/hide animations + for ( prop in props ) { + value = props[ prop ]; + if ( rfxtypes.test( value ) ) { + delete props[ prop ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + + // Pretend to be hidden if this is a "show" and + // there is still data from a stopped show/hide + if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { + hidden = true; + + // Ignore all other no-op show/hide data + } else { + continue; + } + } + orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); + } + } + + // Bail out if this is a no-op like .hide().hide() + propTween = !jQuery.isEmptyObject( props ); + if ( !propTween && jQuery.isEmptyObject( orig ) ) { + return; + } + + // Restrict "overflow" and "display" styles during box animations + if ( isBox && elem.nodeType === 1 ) { + + // Support: IE <=9 - 11, Edge 12 - 15 + // Record all 3 overflow attributes because IE does not infer the shorthand + // from identically-valued overflowX and overflowY and Edge just mirrors + // the overflowX value there. + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Identify a display type, preferring old show/hide data over the CSS cascade + restoreDisplay = dataShow && dataShow.display; + if ( restoreDisplay == null ) { + restoreDisplay = dataPriv.get( elem, "display" ); + } + display = jQuery.css( elem, "display" ); + if ( display === "none" ) { + if ( restoreDisplay ) { + display = restoreDisplay; + } else { + + // Get nonempty value(s) by temporarily forcing visibility + showHide( [ elem ], true ); + restoreDisplay = elem.style.display || restoreDisplay; + display = jQuery.css( elem, "display" ); + showHide( [ elem ] ); + } + } + + // Animate inline elements as inline-block + if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { + if ( jQuery.css( elem, "float" ) === "none" ) { + + // Restore the original display value at the end of pure show/hide animations + if ( !propTween ) { + anim.done( function() { + style.display = restoreDisplay; + } ); + if ( restoreDisplay == null ) { + display = style.display; + restoreDisplay = display === "none" ? "" : display; + } + } + style.display = "inline-block"; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + anim.always( function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + } ); + } + + // Implement show/hide animations + propTween = false; + for ( prop in orig ) { + + // General show/hide setup for this element animation + if ( !propTween ) { + if ( dataShow ) { + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + } else { + dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); + } + + // Store hidden/visible for toggle so `.stop().toggle()` "reverses" + if ( toggle ) { + dataShow.hidden = !hidden; + } + + // Show elements before animating them + if ( hidden ) { + showHide( [ elem ], true ); + } + + /* eslint-disable no-loop-func */ + + anim.done( function() { + + /* eslint-enable no-loop-func */ + + // The final step of a "hide" animation is actually hiding the element + if ( !hidden ) { + showHide( [ elem ] ); + } + dataPriv.remove( elem, "fxshow" ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + } ); + } + + // Per-property setup + propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = propTween.start; + if ( hidden ) { + propTween.end = propTween.start; + propTween.start = 0; + } + } + } +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( Array.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // Not quite $.extend, this won't overwrite existing keys. + // Reusing 'index' because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +function Animation( elem, properties, options ) { + var result, + stopped, + index = 0, + length = Animation.prefilters.length, + deferred = jQuery.Deferred().always( function() { + + // Don't match elem in the :animated selector + delete tick.elem; + } ), + tick = function() { + if ( stopped ) { + return false; + } + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + + // Support: Android 2.3 only + // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ] ); + + // If there's more to do, yield + if ( percent < 1 && length ) { + return remaining; + } + + // If this was an empty animation, synthesize a final progress notification + if ( !length ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + } + + // Resolve the animation and report its conclusion + deferred.resolveWith( elem, [ animation ] ); + return false; + }, + animation = deferred.promise( { + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { + specialEasing: {}, + easing: jQuery.easing._default + }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + + // If we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + if ( stopped ) { + return this; + } + stopped = true; + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // Resolve when we played the last frame; otherwise, reject + if ( gotoEnd ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + } ), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length; index++ ) { + result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + if ( isFunction( result.stop ) ) { + jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = + result.stop.bind( result ); + } + return result; + } + } + + jQuery.map( props, createTween, animation ); + + if ( isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + // Attach callbacks from options + animation + .progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); + + jQuery.fx.timer( + jQuery.extend( tick, { + elem: elem, + anim: animation, + queue: animation.opts.queue + } ) + ); + + return animation; +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweeners: { + "*": [ function( prop, value ) { + var tween = this.createTween( prop, value ); + adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); + return tween; + } ] + }, + + tweener: function( props, callback ) { + if ( isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.match( rnothtmlwhite ); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length; index++ ) { + prop = props[ index ]; + Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; + Animation.tweeners[ prop ].unshift( callback ); + } + }, + + prefilters: [ defaultPrefilter ], + + prefilter: function( callback, prepend ) { + if ( prepend ) { + Animation.prefilters.unshift( callback ); + } else { + Animation.prefilters.push( callback ); + } + } +} ); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !isFunction( easing ) && easing + }; + + // Go to the end state if fx are off + if ( jQuery.fx.off ) { + opt.duration = 0; + + } else { + if ( typeof opt.duration !== "number" ) { + if ( opt.duration in jQuery.fx.speeds ) { + opt.duration = jQuery.fx.speeds[ opt.duration ]; + + } else { + opt.duration = jQuery.fx.speeds._default; + } + } + } + + // Normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.fn.extend( { + fadeTo: function( speed, to, easing, callback ) { + + // Show any hidden elements after setting opacity to 0 + return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() + + // Animate to the value specified + .end().animate( { opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations, or finishing resolves immediately + if ( empty || dataPriv.get( this, "finish" ) ) { + anim.stop( true ); + } + }; + doAnimation.finish = doAnimation; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue && type !== false ) { + this.queue( type || "fx", [] ); + } + + return this.each( function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = dataPriv.get( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && + ( type == null || timers[ index ].queue === type ) ) { + + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // Start the next in the queue if the last step wasn't forced. + // Timers currently will call their complete callbacks, which + // will dequeue but only if they were gotoEnd. + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + } ); + }, + finish: function( type ) { + if ( type !== false ) { + type = type || "fx"; + } + return this.each( function() { + var index, + data = dataPriv.get( this ), + queue = data[ type + "queue" ], + hooks = data[ type + "queueHooks" ], + timers = jQuery.timers, + length = queue ? queue.length : 0; + + // Enable finishing flag on private data + data.finish = true; + + // Empty the queue first + jQuery.queue( this, type, [] ); + + if ( hooks && hooks.stop ) { + hooks.stop.call( this, true ); + } + + // Look for any active animations, and finish them + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && timers[ index ].queue === type ) { + timers[ index ].anim.stop( true ); + timers.splice( index, 1 ); + } + } + + // Look for any animations in the old queue and finish them + for ( index = 0; index < length; index++ ) { + if ( queue[ index ] && queue[ index ].finish ) { + queue[ index ].finish.call( this ); + } + } + + // Turn off finishing flag + delete data.finish; + } ); + } +} ); + +jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +} ); + +// Generate shortcuts for custom animations +jQuery.each( { + slideDown: genFx( "show" ), + slideUp: genFx( "hide" ), + slideToggle: genFx( "toggle" ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +} ); + +jQuery.timers = []; +jQuery.fx.tick = function() { + var timer, + i = 0, + timers = jQuery.timers; + + fxNow = Date.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + + // Run the timer and safely remove it when done (allowing for external removal) + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + jQuery.timers.push( timer ); + jQuery.fx.start(); +}; + +jQuery.fx.interval = 13; +jQuery.fx.start = function() { + if ( inProgress ) { + return; + } + + inProgress = true; + schedule(); +}; + +jQuery.fx.stop = function() { + inProgress = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + + // Default speed + _default: 400 +}; + + +// Based off of the plugin by Clint Helfers, with permission. +// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ +jQuery.fn.delay = function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = window.setTimeout( next, time ); + hooks.stop = function() { + window.clearTimeout( timeout ); + }; + } ); +}; + + +( function() { + var input = document.createElement( "input" ), + select = document.createElement( "select" ), + opt = select.appendChild( document.createElement( "option" ) ); + + input.type = "checkbox"; + + // Support: Android <=4.3 only + // Default value for a checkbox should be "on" + support.checkOn = input.value !== ""; + + // Support: IE <=11 only + // Must access selectedIndex to make default options select + support.optSelected = opt.selected; + + // Support: IE <=11 only + // An input loses its value after becoming a radio + input = document.createElement( "input" ); + input.value = "t"; + input.type = "radio"; + support.radioValue = input.value === "t"; +} )(); + + +var boolHook, + attrHandle = jQuery.expr.attrHandle; + +jQuery.fn.extend( { + attr: function( name, value ) { + return access( this, jQuery.attr, name, value, arguments.length > 1 ); + }, + + removeAttr: function( name ) { + return this.each( function() { + jQuery.removeAttr( this, name ); + } ); + } +} ); + +jQuery.extend( { + attr: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set attributes on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + // Attribute hooks are determined by the lowercase version + // Grab necessary hook if one is defined + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + hooks = jQuery.attrHooks[ name.toLowerCase() ] || + ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); + } + + if ( value !== undefined ) { + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + } + + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + elem.setAttribute( name, value + "" ); + return value; + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + ret = jQuery.find.attr( elem, name ); + + // Non-existent attributes return null, we normalize to undefined + return ret == null ? undefined : ret; + }, + + attrHooks: { + type: { + set: function( elem, value ) { + if ( !support.radioValue && value === "radio" && + nodeName( elem, "input" ) ) { + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + } + }, + + removeAttr: function( elem, value ) { + var name, + i = 0, + + // Attribute names can contain non-HTML whitespace characters + // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 + attrNames = value && value.match( rnothtmlwhite ); + + if ( attrNames && elem.nodeType === 1 ) { + while ( ( name = attrNames[ i++ ] ) ) { + elem.removeAttribute( name ); + } + } + } +} ); + +// Hooks for boolean attributes +boolHook = { + set: function( elem, value, name ) { + if ( value === false ) { + + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + elem.setAttribute( name, name ); + } + return name; + } +}; + +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { + var getter = attrHandle[ name ] || jQuery.find.attr; + + attrHandle[ name ] = function( elem, name, isXML ) { + var ret, handle, + lowercaseName = name.toLowerCase(); + + if ( !isXML ) { + + // Avoid an infinite loop by temporarily removing this function from the getter + handle = attrHandle[ lowercaseName ]; + attrHandle[ lowercaseName ] = ret; + ret = getter( elem, name, isXML ) != null ? + lowercaseName : + null; + attrHandle[ lowercaseName ] = handle; + } + return ret; + }; +} ); + + + + +var rfocusable = /^(?:input|select|textarea|button)$/i, + rclickable = /^(?:a|area)$/i; + +jQuery.fn.extend( { + prop: function( name, value ) { + return access( this, jQuery.prop, name, value, arguments.length > 1 ); + }, + + removeProp: function( name ) { + return this.each( function() { + delete this[ jQuery.propFix[ name ] || name ]; + } ); + } +} ); + +jQuery.extend( { + prop: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set properties on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + return ( elem[ name ] = value ); + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + return elem[ name ]; + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + + // Support: IE <=9 - 11 only + // elem.tabIndex doesn't always return the + // correct value when it hasn't been explicitly set + // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + // Use proper attribute retrieval(#12072) + var tabindex = jQuery.find.attr( elem, "tabindex" ); + + if ( tabindex ) { + return parseInt( tabindex, 10 ); + } + + if ( + rfocusable.test( elem.nodeName ) || + rclickable.test( elem.nodeName ) && + elem.href + ) { + return 0; + } + + return -1; + } + } + }, + + propFix: { + "for": "htmlFor", + "class": "className" + } +} ); + +// Support: IE <=11 only +// Accessing the selectedIndex property +// forces the browser to respect setting selected +// on the option +// The getter ensures a default option is selected +// when in an optgroup +// eslint rule "no-unused-expressions" is disabled for this code +// since it considers such accessions noop +if ( !support.optSelected ) { + jQuery.propHooks.selected = { + get: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent && parent.parentNode ) { + parent.parentNode.selectedIndex; + } + return null; + }, + set: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + }; +} + +jQuery.each( [ + "tabIndex", + "readOnly", + "maxLength", + "cellSpacing", + "cellPadding", + "rowSpan", + "colSpan", + "useMap", + "frameBorder", + "contentEditable" +], function() { + jQuery.propFix[ this.toLowerCase() ] = this; +} ); + + + + + // Strip and collapse whitespace according to HTML spec + // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace + function stripAndCollapse( value ) { + var tokens = value.match( rnothtmlwhite ) || []; + return tokens.join( " " ); + } + + +function getClass( elem ) { + return elem.getAttribute && elem.getAttribute( "class" ) || ""; +} + +function classesToArray( value ) { + if ( Array.isArray( value ) ) { + return value; + } + if ( typeof value === "string" ) { + return value.match( rnothtmlwhite ) || []; + } + return []; +} + +jQuery.fn.extend( { + addClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + if ( cur.indexOf( " " + clazz + " " ) < 0 ) { + cur += clazz + " "; + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( !arguments.length ) { + return this.attr( "class", "" ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + + // This expression is here for better compressibility (see addClass) + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + + // Remove *all* instances + while ( cur.indexOf( " " + clazz + " " ) > -1 ) { + cur = cur.replace( " " + clazz + " ", " " ); + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value, + isValidValue = type === "string" || Array.isArray( value ); + + if ( typeof stateVal === "boolean" && isValidValue ) { + return stateVal ? this.addClass( value ) : this.removeClass( value ); + } + + if ( isFunction( value ) ) { + return this.each( function( i ) { + jQuery( this ).toggleClass( + value.call( this, i, getClass( this ), stateVal ), + stateVal + ); + } ); + } + + return this.each( function() { + var className, i, self, classNames; + + if ( isValidValue ) { + + // Toggle individual class names + i = 0; + self = jQuery( this ); + classNames = classesToArray( value ); + + while ( ( className = classNames[ i++ ] ) ) { + + // Check each className given, space separated list + if ( self.hasClass( className ) ) { + self.removeClass( className ); + } else { + self.addClass( className ); + } + } + + // Toggle whole class name + } else if ( value === undefined || type === "boolean" ) { + className = getClass( this ); + if ( className ) { + + // Store className if set + dataPriv.set( this, "__className__", className ); + } + + // If the element has a class name or if we're passed `false`, + // then remove the whole classname (if there was one, the above saved it). + // Otherwise bring back whatever was previously saved (if anything), + // falling back to the empty string if nothing was stored. + if ( this.setAttribute ) { + this.setAttribute( "class", + className || value === false ? + "" : + dataPriv.get( this, "__className__" ) || "" + ); + } + } + } ); + }, + + hasClass: function( selector ) { + var className, elem, + i = 0; + + className = " " + selector + " "; + while ( ( elem = this[ i++ ] ) ) { + if ( elem.nodeType === 1 && + ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { + return true; + } + } + + return false; + } +} ); + + + + +var rreturn = /\r/g; + +jQuery.fn.extend( { + val: function( value ) { + var hooks, ret, valueIsFunction, + elem = this[ 0 ]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.type ] || + jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + + if ( hooks && + "get" in hooks && + ( ret = hooks.get( elem, "value" ) ) !== undefined + ) { + return ret; + } + + ret = elem.value; + + // Handle most common string cases + if ( typeof ret === "string" ) { + return ret.replace( rreturn, "" ); + } + + // Handle cases where value is null/undef or number + return ret == null ? "" : ret; + } + + return; + } + + valueIsFunction = isFunction( value ); + + return this.each( function( i ) { + var val; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( valueIsFunction ) { + val = value.call( this, i, jQuery( this ).val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + + } else if ( typeof val === "number" ) { + val += ""; + + } else if ( Array.isArray( val ) ) { + val = jQuery.map( val, function( value ) { + return value == null ? "" : value + ""; + } ); + } + + hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + } ); + } +} ); + +jQuery.extend( { + valHooks: { + option: { + get: function( elem ) { + + var val = jQuery.find.attr( elem, "value" ); + return val != null ? + val : + + // Support: IE <=10 - 11 only + // option.text throws exceptions (#14686, #14858) + // Strip and collapse whitespace + // https://html.spec.whatwg.org/#strip-and-collapse-whitespace + stripAndCollapse( jQuery.text( elem ) ); + } + }, + select: { + get: function( elem ) { + var value, option, i, + options = elem.options, + index = elem.selectedIndex, + one = elem.type === "select-one", + values = one ? null : [], + max = one ? index + 1 : options.length; + + if ( index < 0 ) { + i = max; + + } else { + i = one ? index : 0; + } + + // Loop through all the selected options + for ( ; i < max; i++ ) { + option = options[ i ]; + + // Support: IE <=9 only + // IE8-9 doesn't update selected after form reset (#2551) + if ( ( option.selected || i === index ) && + + // Don't return options that are disabled or in a disabled optgroup + !option.disabled && + ( !option.parentNode.disabled || + !nodeName( option.parentNode, "optgroup" ) ) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + }, + + set: function( elem, value ) { + var optionSet, option, + options = elem.options, + values = jQuery.makeArray( value ), + i = options.length; + + while ( i-- ) { + option = options[ i ]; + + /* eslint-disable no-cond-assign */ + + if ( option.selected = + jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 + ) { + optionSet = true; + } + + /* eslint-enable no-cond-assign */ + } + + // Force browsers to behave consistently when non-matching value is set + if ( !optionSet ) { + elem.selectedIndex = -1; + } + return values; + } + } + } +} ); + +// Radios and checkboxes getter/setter +jQuery.each( [ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + set: function( elem, value ) { + if ( Array.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); + } + } + }; + if ( !support.checkOn ) { + jQuery.valHooks[ this ].get = function( elem ) { + return elem.getAttribute( "value" ) === null ? "on" : elem.value; + }; + } +} ); + + + + +// Return jQuery for attributes-only inclusion + + +support.focusin = "onfocusin" in window; + + +var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, + stopPropagationCallback = function( e ) { + e.stopPropagation(); + }; + +jQuery.extend( jQuery.event, { + + trigger: function( event, data, elem, onlyHandlers ) { + + var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; + + cur = lastElement = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "." ) > -1 ) { + + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split( "." ); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf( ":" ) < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join( "." ); + event.rnamespace = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === ( elem.ownerDocument || document ) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { + lastElement = cur; + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && + dataPriv.get( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( ( !special._default || + special._default.apply( eventPath.pop(), data ) === false ) && + acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name as the event. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + + if ( event.isPropagationStopped() ) { + lastElement.addEventListener( type, stopPropagationCallback ); + } + + elem[ type ](); + + if ( event.isPropagationStopped() ) { + lastElement.removeEventListener( type, stopPropagationCallback ); + } + + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + // Piggyback on a donor event to simulate a different one + // Used only for `focus(in | out)` events + simulate: function( type, elem, event ) { + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true + } + ); + + jQuery.event.trigger( e, null, elem ); + } + +} ); + +jQuery.fn.extend( { + + trigger: function( type, data ) { + return this.each( function() { + jQuery.event.trigger( type, data, this ); + } ); + }, + triggerHandler: function( type, data ) { + var elem = this[ 0 ]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +} ); + + +// Support: Firefox <=44 +// Firefox doesn't have focus(in | out) events +// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 +// +// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 +// focus(in | out) events fire after focus & blur events, +// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order +// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 +if ( !support.focusin ) { + jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + var doc = this.ownerDocument || this, + attaches = dataPriv.access( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this, + attaches = dataPriv.access( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + dataPriv.remove( doc, fix ); + + } else { + dataPriv.access( doc, fix, attaches ); + } + } + }; + } ); +} +var location = window.location; + +var nonce = Date.now(); + +var rquery = ( /\?/ ); + + + +// Cross-browser xml parsing +jQuery.parseXML = function( data ) { + var xml; + if ( !data || typeof data !== "string" ) { + return null; + } + + // Support: IE 9 - 11 only + // IE throws on parseFromString with invalid input. + try { + xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); + } catch ( e ) { + xml = undefined; + } + + if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { + jQuery.error( "Invalid XML: " + data ); + } + return xml; +}; + + +var + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, + rsubmittable = /^(?:input|select|textarea|keygen)/i; + +function buildParams( prefix, obj, traditional, add ) { + var name; + + if ( Array.isArray( obj ) ) { + + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + + // Item is non-scalar (array or object), encode its numeric index. + buildParams( + prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", + v, + traditional, + add + ); + } + } ); + + } else if ( !traditional && toType( obj ) === "object" ) { + + // Serialize object item. + for ( name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + + // Serialize scalar item. + add( prefix, obj ); + } +} + +// Serialize an array of form elements or a set of +// key/values into a query string +jQuery.param = function( a, traditional ) { + var prefix, + s = [], + add = function( key, valueOrFunction ) { + + // If value is a function, invoke it and use its return value + var value = isFunction( valueOrFunction ) ? + valueOrFunction() : + valueOrFunction; + + s[ s.length ] = encodeURIComponent( key ) + "=" + + encodeURIComponent( value == null ? "" : value ); + }; + + if ( a == null ) { + return ""; + } + + // If an array was passed in, assume that it is an array of form elements. + if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + } ); + + } else { + + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ); +}; + +jQuery.fn.extend( { + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + serializeArray: function() { + return this.map( function() { + + // Can add propHook for "elements" to filter or add form elements + var elements = jQuery.prop( this, "elements" ); + return elements ? jQuery.makeArray( elements ) : this; + } ) + .filter( function() { + var type = this.type; + + // Use .is( ":disabled" ) so that fieldset[disabled] works + return this.name && !jQuery( this ).is( ":disabled" ) && + rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && + ( this.checked || !rcheckableType.test( type ) ); + } ) + .map( function( i, elem ) { + var val = jQuery( this ).val(); + + if ( val == null ) { + return null; + } + + if ( Array.isArray( val ) ) { + return jQuery.map( val, function( val ) { + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ); + } + + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ).get(); + } +} ); + + +var + r20 = /%20/g, + rhash = /#.*$/, + rantiCache = /([?&])_=[^&]*/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, + + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = "*/".concat( "*" ), + + // Anchor tag for parsing the document origin + originAnchor = document.createElement( "a" ); + originAnchor.href = location.href; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + var dataType, + i = 0, + dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; + + if ( isFunction( func ) ) { + + // For each dataType in the dataTypeExpression + while ( ( dataType = dataTypes[ i++ ] ) ) { + + // Prepend if requested + if ( dataType[ 0 ] === "+" ) { + dataType = dataType.slice( 1 ) || "*"; + ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); + + // Otherwise append + } else { + ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); + } + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { + + var inspected = {}, + seekingTransport = ( structure === transports ); + + function inspect( dataType ) { + var selected; + inspected[ dataType ] = true; + jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { + var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); + if ( typeof dataTypeOrTransport === "string" && + !seekingTransport && !inspected[ dataTypeOrTransport ] ) { + + options.dataTypes.unshift( dataTypeOrTransport ); + inspect( dataTypeOrTransport ); + return false; + } else if ( seekingTransport ) { + return !( selected = dataTypeOrTransport ); + } + } ); + return selected; + } + + return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } + + return target; +} + +/* Handles responses to an ajax request: + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var ct, type, finalDataType, firstDataType, + contents = s.contents, + dataTypes = s.dataTypes; + + // Remove auto dataType and get content-type in the process + while ( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +/* Chain conversions given the request and the original response + * Also sets the responseXXX fields on the jqXHR instance + */ +function ajaxConvert( s, response, jqXHR, isSuccess ) { + var conv2, current, conv, tmp, prev, + converters = {}, + + // Work with a copy of dataTypes in case we need to modify it for conversion + dataTypes = s.dataTypes.slice(); + + // Create converters map with lowercased keys + if ( dataTypes[ 1 ] ) { + for ( conv in s.converters ) { + converters[ conv.toLowerCase() ] = s.converters[ conv ]; + } + } + + current = dataTypes.shift(); + + // Convert to each sequential dataType + while ( current ) { + + if ( s.responseFields[ current ] ) { + jqXHR[ s.responseFields[ current ] ] = response; + } + + // Apply the dataFilter if provided + if ( !prev && isSuccess && s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + prev = current; + current = dataTypes.shift(); + + if ( current ) { + + // There's only work to do if current dataType is non-auto + if ( current === "*" ) { + + current = prev; + + // Convert response if prev dataType is non-auto and differs from current + } else if ( prev !== "*" && prev !== current ) { + + // Seek a direct converter + conv = converters[ prev + " " + current ] || converters[ "* " + current ]; + + // If none found, seek a pair + if ( !conv ) { + for ( conv2 in converters ) { + + // If conv2 outputs current + tmp = conv2.split( " " ); + if ( tmp[ 1 ] === current ) { + + // If prev can be converted to accepted input + conv = converters[ prev + " " + tmp[ 0 ] ] || + converters[ "* " + tmp[ 0 ] ]; + if ( conv ) { + + // Condense equivalence converters + if ( conv === true ) { + conv = converters[ conv2 ]; + + // Otherwise, insert the intermediate dataType + } else if ( converters[ conv2 ] !== true ) { + current = tmp[ 0 ]; + dataTypes.unshift( tmp[ 1 ] ); + } + break; + } + } + } + } + + // Apply converter (if not an equivalence) + if ( conv !== true ) { + + // Unless errors are allowed to bubble, catch and return them + if ( conv && s.throws ) { + response = conv( response ); + } else { + try { + response = conv( response ); + } catch ( e ) { + return { + state: "parsererror", + error: conv ? e : "No conversion from " + prev + " to " + current + }; + } + } + } + } + } + } + + return { state: "success", data: response }; +} + +jQuery.extend( { + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {}, + + ajaxSettings: { + url: location.href, + type: "GET", + isLocal: rlocalProtocol.test( location.protocol ), + global: true, + processData: true, + async: true, + contentType: "application/x-www-form-urlencoded; charset=UTF-8", + + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + throws: false, + traditional: false, + headers: {}, + */ + + accepts: { + "*": allTypes, + text: "text/plain", + html: "text/html", + xml: "application/xml, text/xml", + json: "application/json, text/javascript" + }, + + contents: { + xml: /\bxml\b/, + html: /\bhtml/, + json: /\bjson\b/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText", + json: "responseJSON" + }, + + // Data converters + // Keys separate source (or catchall "*") and destination types with a single space + converters: { + + // Convert anything to text + "* text": String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": JSON.parse, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + url: true, + context: true + } + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + return settings ? + + // Building a settings object + ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : + + // Extending ajaxSettings + ajaxExtend( jQuery.ajaxSettings, target ); + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var transport, + + // URL without anti-cache param + cacheURL, + + // Response headers + responseHeadersString, + responseHeaders, + + // timeout handle + timeoutTimer, + + // Url cleanup var + urlAnchor, + + // Request state (becomes false upon send and true upon completion) + completed, + + // To know if global events are to be dispatched + fireGlobals, + + // Loop variable + i, + + // uncached part of the url + uncached, + + // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + + // Callbacks context + callbackContext = s.context || s, + + // Context for global events is callbackContext if it is a DOM node or jQuery collection + globalEventContext = s.context && + ( callbackContext.nodeType || callbackContext.jquery ) ? + jQuery( callbackContext ) : + jQuery.event, + + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + + // Status-dependent callbacks + statusCode = s.statusCode || {}, + + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + + // Default abort message + strAbort = "canceled", + + // Fake xhr + jqXHR = { + readyState: 0, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( completed ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while ( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[ 1 ].toLowerCase() + " " ] = + ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) + .concat( match[ 2 ] ); + } + } + match = responseHeaders[ key.toLowerCase() + " " ]; + } + return match == null ? null : match.join( ", " ); + }, + + // Raw string + getAllResponseHeaders: function() { + return completed ? responseHeadersString : null; + }, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( completed == null ) { + name = requestHeadersNames[ name.toLowerCase() ] = + requestHeadersNames[ name.toLowerCase() ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( completed == null ) { + s.mimeType = type; + } + return this; + }, + + // Status-dependent callbacks + statusCode: function( map ) { + var code; + if ( map ) { + if ( completed ) { + + // Execute the appropriate callbacks + jqXHR.always( map[ jqXHR.status ] ); + } else { + + // Lazy-add the new callbacks in a way that preserves old ones + for ( code in map ) { + statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; + } + } + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + var finalText = statusText || strAbort; + if ( transport ) { + transport.abort( finalText ); + } + done( 0, finalText ); + return this; + } + }; + + // Attach deferreds + deferred.promise( jqXHR ); + + // Add protocol if not provided (prefilters might expect it) + // Handle falsy url in the settings object (#10093: consistency with old signature) + // We also use the url parameter if available + s.url = ( ( url || s.url || location.href ) + "" ) + .replace( rprotocol, location.protocol + "//" ); + + // Alias method option to type as per ticket #12004 + s.type = options.method || options.type || s.method || s.type; + + // Extract dataTypes list + s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; + + // A cross-domain request is in order when the origin doesn't match the current origin. + if ( s.crossDomain == null ) { + urlAnchor = document.createElement( "a" ); + + // Support: IE <=8 - 11, Edge 12 - 15 + // IE throws exception on accessing the href property if url is malformed, + // e.g. http://example.com:80x/ + try { + urlAnchor.href = s.url; + + // Support: IE <=8 - 11 only + // Anchor's host property isn't correctly set when s.url is relative + urlAnchor.href = urlAnchor.href; + s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== + urlAnchor.protocol + "//" + urlAnchor.host; + } catch ( e ) { + + // If there is an error parsing the URL, assume it is crossDomain, + // it can be rejected by the transport if it is invalid + s.crossDomain = true; + } + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefilter, stop there + if ( completed ) { + return jqXHR; + } + + // We can fire global events as of now if asked to + // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) + fireGlobals = jQuery.event && s.global; + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Save the URL in case we're toying with the If-Modified-Since + // and/or If-None-Match header later on + // Remove hash to simplify url manipulation + cacheURL = s.url.replace( rhash, "" ); + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // Remember the hash so we can put it back + uncached = s.url.slice( cacheURL.length ); + + // If data is available and should be processed, append data to url + if ( s.data && ( s.processData || typeof s.data === "string" ) ) { + cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; + + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Add or update anti-cache param if needed + if ( s.cache === false ) { + cacheURL = cacheURL.replace( rantiCache, "$1" ); + uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; + } + + // Put hash and anti-cache on the URL that will be requested (gh-1732) + s.url = cacheURL + uncached; + + // Change '%20' to '+' if this is encoded form body content (gh-2658) + } else if ( s.data && s.processData && + ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { + s.data = s.data.replace( r20, "+" ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + if ( jQuery.lastModified[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); + } + if ( jQuery.etag[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? + s.accepts[ s.dataTypes[ 0 ] ] + + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && + ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { + + // Abort if not done already and return + return jqXHR.abort(); + } + + // Aborting is no longer a cancellation + strAbort = "abort"; + + // Install callbacks on deferreds + completeDeferred.add( s.complete ); + jqXHR.done( s.success ); + jqXHR.fail( s.error ); + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + + // If request was aborted inside ajaxSend, stop there + if ( completed ) { + return jqXHR; + } + + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = window.setTimeout( function() { + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + completed = false; + transport.send( requestHeaders, done ); + } catch ( e ) { + + // Rethrow post-completion exceptions + if ( completed ) { + throw e; + } + + // Propagate others as results + done( -1, e ); + } + } + + // Callback for when everything is done + function done( status, nativeStatusText, responses, headers ) { + var isSuccess, success, error, response, modified, + statusText = nativeStatusText; + + // Ignore repeat invocations + if ( completed ) { + return; + } + + completed = true; + + // Clear timeout if it exists + if ( timeoutTimer ) { + window.clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + // Determine if successful + isSuccess = status >= 200 && status < 300 || status === 304; + + // Get response data + if ( responses ) { + response = ajaxHandleResponses( s, jqXHR, responses ); + } + + // Convert no matter what (that way responseXXX fields are always set) + response = ajaxConvert( s, response, jqXHR, isSuccess ); + + // If successful, handle type chaining + if ( isSuccess ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + modified = jqXHR.getResponseHeader( "Last-Modified" ); + if ( modified ) { + jQuery.lastModified[ cacheURL ] = modified; + } + modified = jqXHR.getResponseHeader( "etag" ); + if ( modified ) { + jQuery.etag[ cacheURL ] = modified; + } + } + + // if no content + if ( status === 204 || s.type === "HEAD" ) { + statusText = "nocontent"; + + // if not modified + } else if ( status === 304 ) { + statusText = "notmodified"; + + // If we have data, let's convert it + } else { + statusText = response.state; + success = response.data; + error = response.error; + isSuccess = !error; + } + } else { + + // Extract error from statusText and normalize for non-aborts + error = statusText; + if ( status || !statusText ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = ( nativeStatusText || statusText ) + ""; + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + return jqXHR; + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + } +} ); + +jQuery.each( [ "get", "post" ], function( i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + + // Shift arguments if data argument was omitted + if ( isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + // The url can be an options object (which then must have .url) + return jQuery.ajax( jQuery.extend( { + url: url, + type: method, + dataType: type, + data: data, + success: callback + }, jQuery.isPlainObject( url ) && url ) ); + }; +} ); + + +jQuery._evalUrl = function( url, options ) { + return jQuery.ajax( { + url: url, + + // Make this explicit, since user can override this through ajaxSetup (#11264) + type: "GET", + dataType: "script", + cache: true, + async: false, + global: false, + + // Only evaluate the response if it is successful (gh-4126) + // dataFilter is not invoked for failure responses, so using it instead + // of the default converter is kludgy but it works. + converters: { + "text script": function() {} + }, + dataFilter: function( response ) { + jQuery.globalEval( response, options ); + } + } ); +}; + + +jQuery.fn.extend( { + wrapAll: function( html ) { + var wrap; + + if ( this[ 0 ] ) { + if ( isFunction( html ) ) { + html = html.call( this[ 0 ] ); + } + + // The elements to wrap the target around + wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); + + if ( this[ 0 ].parentNode ) { + wrap.insertBefore( this[ 0 ] ); + } + + wrap.map( function() { + var elem = this; + + while ( elem.firstElementChild ) { + elem = elem.firstElementChild; + } + + return elem; + } ).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( isFunction( html ) ) { + return this.each( function( i ) { + jQuery( this ).wrapInner( html.call( this, i ) ); + } ); + } + + return this.each( function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + } ); + }, + + wrap: function( html ) { + var htmlIsFunction = isFunction( html ); + + return this.each( function( i ) { + jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); + } ); + }, + + unwrap: function( selector ) { + this.parent( selector ).not( "body" ).each( function() { + jQuery( this ).replaceWith( this.childNodes ); + } ); + return this; + } +} ); + + +jQuery.expr.pseudos.hidden = function( elem ) { + return !jQuery.expr.pseudos.visible( elem ); +}; +jQuery.expr.pseudos.visible = function( elem ) { + return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); +}; + + + + +jQuery.ajaxSettings.xhr = function() { + try { + return new window.XMLHttpRequest(); + } catch ( e ) {} +}; + +var xhrSuccessStatus = { + + // File protocol always yields status code 0, assume 200 + 0: 200, + + // Support: IE <=9 only + // #1450: sometimes IE returns 1223 when it should be 204 + 1223: 204 + }, + xhrSupported = jQuery.ajaxSettings.xhr(); + +support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); +support.ajax = xhrSupported = !!xhrSupported; + +jQuery.ajaxTransport( function( options ) { + var callback, errorCallback; + + // Cross domain only allowed if supported through XMLHttpRequest + if ( support.cors || xhrSupported && !options.crossDomain ) { + return { + send: function( headers, complete ) { + var i, + xhr = options.xhr(); + + xhr.open( + options.type, + options.url, + options.async, + options.username, + options.password + ); + + // Apply custom fields if provided + if ( options.xhrFields ) { + for ( i in options.xhrFields ) { + xhr[ i ] = options.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( options.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( options.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Set headers + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + + // Callback + callback = function( type ) { + return function() { + if ( callback ) { + callback = errorCallback = xhr.onload = + xhr.onerror = xhr.onabort = xhr.ontimeout = + xhr.onreadystatechange = null; + + if ( type === "abort" ) { + xhr.abort(); + } else if ( type === "error" ) { + + // Support: IE <=9 only + // On a manual native abort, IE9 throws + // errors on any property access that is not readyState + if ( typeof xhr.status !== "number" ) { + complete( 0, "error" ); + } else { + complete( + + // File: protocol always yields status 0; see #8605, #14207 + xhr.status, + xhr.statusText + ); + } + } else { + complete( + xhrSuccessStatus[ xhr.status ] || xhr.status, + xhr.statusText, + + // Support: IE <=9 only + // IE9 has no XHR2 but throws on binary (trac-11426) + // For XHR2 non-text, let the caller handle it (gh-2498) + ( xhr.responseType || "text" ) !== "text" || + typeof xhr.responseText !== "string" ? + { binary: xhr.response } : + { text: xhr.responseText }, + xhr.getAllResponseHeaders() + ); + } + } + }; + }; + + // Listen to events + xhr.onload = callback(); + errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); + + // Support: IE 9 only + // Use onreadystatechange to replace onabort + // to handle uncaught aborts + if ( xhr.onabort !== undefined ) { + xhr.onabort = errorCallback; + } else { + xhr.onreadystatechange = function() { + + // Check readyState before timeout as it changes + if ( xhr.readyState === 4 ) { + + // Allow onerror to be called first, + // but that will not handle a native abort + // Also, save errorCallback to a variable + // as xhr.onerror cannot be accessed + window.setTimeout( function() { + if ( callback ) { + errorCallback(); + } + } ); + } + }; + } + + // Create the abort callback + callback = callback( "abort" ); + + try { + + // Do send the request (this may raise an exception) + xhr.send( options.hasContent && options.data || null ); + } catch ( e ) { + + // #14683: Only rethrow if this hasn't been notified as an error yet + if ( callback ) { + throw e; + } + } + }, + + abort: function() { + if ( callback ) { + callback(); + } + } + }; + } +} ); + + + + +// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) +jQuery.ajaxPrefilter( function( s ) { + if ( s.crossDomain ) { + s.contents.script = false; + } +} ); + +// Install script dataType +jQuery.ajaxSetup( { + accepts: { + script: "text/javascript, application/javascript, " + + "application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /\b(?:java|ecma)script\b/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +} ); + +// Handle cache's special case and crossDomain +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + } +} ); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function( s ) { + + // This transport only deals with cross domain or forced-by-attrs requests + if ( s.crossDomain || s.scriptAttrs ) { + var script, callback; + return { + send: function( _, complete ) { + script = jQuery( " + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

The full Pygments API¶

+

This page describes the Pygments API.

+
+

High-level API¶

+

Functions from the pygments module:

+
+
+pygments.lex(code, lexer)¶
+

Lex code with the lexer (must be a Lexer instance) +and return an iterable of tokens. Currently, this only calls +lexer.get_tokens().

+
+ +
+
+pygments.format(tokens, formatter, outfile=None)¶
+

Format a token stream (iterable of tokens) tokens with the +formatter (must be a Formatter instance). The result is +written to outfile, or if that is None, returned as a +string.

+
+ +
+
+pygments.highlight(code, lexer, formatter, outfile=None)¶
+

This is the most high-level highlighting function. +It combines lex and format in one function.

+
+ +

Functions from pygments.lexers:

+
+
+pygments.lexers.get_lexer_by_name(alias, **options)¶
+

Return an instance of a Lexer subclass that has alias in its +aliases list. The lexer is given the options at its +instantiation.

+

Will raise pygments.util.ClassNotFound if no lexer with that alias is +found.

+
+ +
+
+pygments.lexers.get_lexer_for_filename(fn, **options)¶
+

Return a Lexer subclass instance that has a filename pattern +matching fn. The lexer is given the options at its +instantiation.

+

Will raise pygments.util.ClassNotFound if no lexer for that filename +is found.

+
+ +
+
+pygments.lexers.get_lexer_for_mimetype(mime, **options)¶
+

Return a Lexer subclass instance that has mime in its mimetype +list. The lexer is given the options at its instantiation.

+

Will raise pygments.util.ClassNotFound if not lexer for that mimetype +is found.

+
+ +
+
+pygments.lexers.load_lexer_from_file(filename, lexername="CustomLexer", **options)¶
+

Return a Lexer subclass instance loaded from the provided file, relative +to the current directory. The file is expected to contain a Lexer class +named lexername (by default, CustomLexer). Users should be very careful with +the input, because this method is equivalent to running eval on the input file. +The lexer is given the options at its instantiation.

+

ClassNotFound is raised if there are any errors loading the Lexer

+
+

New in version 2.2.

+
+
+ +
+
+pygments.lexers.guess_lexer(text, **options)¶
+

Return a Lexer subclass instance that’s guessed from the text in +text. For that, the analyse_text() method of every known lexer +class is called with the text as argument, and the lexer which returned the +highest value will be instantiated and returned.

+

pygments.util.ClassNotFound is raised if no lexer thinks it can +handle the content.

+
+ +
+
+pygments.lexers.guess_lexer_for_filename(filename, text, **options)¶
+

As guess_lexer(), but only lexers which have a pattern in filenames +or alias_filenames that matches filename are taken into consideration.

+

pygments.util.ClassNotFound is raised if no lexer thinks it can +handle the content.

+
+ +
+
+pygments.lexers.get_all_lexers()¶
+

Return an iterable over all registered lexers, yielding tuples in the +format:

+
(longname, tuple of aliases, tuple of filename patterns, tuple of mimetypes)
+
+
+
+

New in version 0.6.

+
+
+ +
+
+pygments.lexers.find_lexer_class_by_name(alias)¶
+

Return the Lexer subclass that has alias in its aliases list, without +instantiating it.

+

Will raise pygments.util.ClassNotFound if no lexer with that alias is +found.

+
+

New in version 2.2.

+
+
+ +
+
+pygments.lexers.find_lexer_class(name)¶
+

Return the Lexer subclass that with the name attribute as given by +the name argument.

+
+ +

Functions from pygments.formatters:

+
+
+pygments.formatters.get_formatter_by_name(alias, **options)¶
+

Return an instance of a Formatter subclass that has alias in its +aliases list. The formatter is given the options at its instantiation.

+

Will raise pygments.util.ClassNotFound if no formatter with that +alias is found.

+
+ +
+
+pygments.formatters.get_formatter_for_filename(fn, **options)¶
+

Return a Formatter subclass instance that has a filename pattern +matching fn. The formatter is given the options at its instantiation.

+

Will raise pygments.util.ClassNotFound if no formatter for that filename +is found.

+
+ +
+
+pygments.formatters.load_formatter_from_file(filename, formattername="CustomFormatter", **options)¶
+

Return a Formatter subclass instance loaded from the provided file, relative +to the current directory. The file is expected to contain a Formatter class +named formattername (by default, CustomFormatter). Users should be very +careful with the input, because this method is equivalent to running eval +on the input file. The formatter is given the options at its instantiation.

+

ClassNotFound is raised if there are any errors loading the Formatter

+
+

New in version 2.2.

+
+
+ +

Functions from pygments.styles:

+
+
+pygments.styles.get_style_by_name(name)¶
+

Return a style class by its short name. The names of the builtin styles +are listed in pygments.styles.STYLE_MAP.

+

Will raise pygments.util.ClassNotFound if no style of that name is +found.

+
+ +
+
+pygments.styles.get_all_styles()¶
+

Return an iterable over all registered styles, yielding their names.

+
+

New in version 0.6.

+
+
+ +
+
+

Lexers¶

+

The base lexer class from which all lexers are derived is:

+
+
+class pygments.lexer.Lexer(**options)¶
+

The constructor takes a **keywords dictionary of options. +Every subclass must first process its own options and then call +the Lexer constructor, since it processes the stripnl, +stripall and tabsize options.

+

An example looks like this:

+
def __init__(self, **options):
+    self.compress = options.get('compress', '')
+    Lexer.__init__(self, **options)
+
+
+

As these options must all be specifiable as strings (due to the +command line usage), there are various utility functions +available to help with that, see Option processing.

+
+
+get_tokens(text)¶
+

This method is the basic interface of a lexer. It is called by +the highlight() function. It must process the text and return an +iterable of (tokentype, value) pairs from text.

+

Normally, you don’t need to override this method. The default +implementation processes the stripnl, stripall and tabsize +options and then yields all tokens from get_tokens_unprocessed(), +with the index dropped.

+
+ +
+
+get_tokens_unprocessed(text)¶
+

This method should process the text and return an iterable of +(index, tokentype, value) tuples where index is the starting +position of the token within the input text.

+

This method must be overridden by subclasses.

+
+ +
+
+static analyse_text(text)¶
+

A static method which is called for lexer guessing. It should analyse +the text and return a float in the range from 0.0 to 1.0. +If it returns 0.0, the lexer will not be selected as the most +probable one, if it returns 1.0, it will be selected immediately.

+
+

Note

+

You don’t have to add @staticmethod to the definition of +this method, this will be taken care of by the Lexer’s metaclass.

+
+
+ +

For a list of known tokens have a look at the Builtin Tokens page.

+

A lexer also can have the following attributes (in fact, they are mandatory +except alias_filenames) that are used by the builtin lookup mechanism.

+
+
+name¶
+

Full name for the lexer, in human-readable form.

+
+ +
+
+aliases¶
+

A list of short, unique identifiers that can be used to lookup +the lexer from a list, e.g. using get_lexer_by_name().

+
+ +
+
+filenames¶
+

A list of fnmatch patterns that match filenames which contain +content for this lexer. The patterns in this list should be unique among +all lexers.

+
+ +
+
+alias_filenames¶
+

A list of fnmatch patterns that match filenames which may or may not +contain content for this lexer. This list is used by the +guess_lexer_for_filename() function, to determine which lexers +are then included in guessing the correct one. That means that +e.g. every lexer for HTML and a template language should include +\*.html in this list.

+
+ +
+
+mimetypes¶
+

A list of MIME types for content that can be lexed with this +lexer.

+
+ +
+ +
+
+

Formatters¶

+

A formatter is derived from this class:

+
+
+class pygments.formatter.Formatter(**options)¶
+

As with lexers, this constructor processes options and then must call the +base class __init__().

+

The Formatter class recognizes the options style, full and +title. It is up to the formatter class whether it uses them.

+
+
+get_style_defs(arg='')¶
+

This method must return statements or declarations suitable to define +the current style for subsequent highlighted text (e.g. CSS classes +in the HTMLFormatter).

+

The optional argument arg can be used to modify the generation and +is formatter dependent (it is standardized because it can be given on +the command line).

+

This method is called by the -S command-line option, +the arg is then given by the -a option.

+
+ +
+
+format(tokensource, outfile)¶
+

This method must format the tokens from the tokensource iterable and +write the formatted version to the file object outfile.

+

Formatter options can control how exactly the tokens are converted.

+
+ +
+

New in version 0.7: A formatter must have the following attributes that are used by the +builtin lookup mechanism.

+
+
+
+name¶
+

Full name for the formatter, in human-readable form.

+
+ +
+
+aliases¶
+

A list of short, unique identifiers that can be used to lookup +the formatter from a list, e.g. using get_formatter_by_name().

+
+ +
+
+filenames¶
+

A list of fnmatch patterns that match filenames for which this +formatter can produce output. The patterns in this list should be unique +among all formatters.

+
+ +
+ +
+
+

Option processing¶

+

The pygments.util module has some utility functions usable for option +processing:

+
+
+exception pygments.util.OptionError¶
+

This exception will be raised by all option processing functions if +the type or value of the argument is not correct.

+
+ +
+
+pygments.util.get_bool_opt(options, optname, default=None)¶
+

Interpret the key optname from the dictionary options as a boolean and +return it. Return default if optname is not in options.

+

The valid string values for True are 1, yes, true and +on, the ones for False are 0, no, false and off +(matched case-insensitively).

+
+ +
+
+pygments.util.get_int_opt(options, optname, default=None)¶
+

As get_bool_opt(), but interpret the value as an integer.

+
+ +
+
+pygments.util.get_list_opt(options, optname, default=None)¶
+

If the key optname from the dictionary options is a string, +split it at whitespace and return it. If it is already a list +or a tuple, it is returned as a list.

+
+ +
+
+pygments.util.get_choice_opt(options, optname, allowed, default=None)¶
+

If the key optname from the dictionary is not in the sequence +allowed, raise an error, otherwise return it.

+
+

New in version 0.8.

+
+
+ +
+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/docs/authors.html b/doc/_build/html/docs/authors.html new file mode 100644 index 0000000..0058940 --- /dev/null +++ b/doc/_build/html/docs/authors.html @@ -0,0 +1,349 @@ + + + + + + + Full contributor list — Pygments + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Full contributor list¶

+

Pygments is written and maintained by Georg Brandl <georg@python.org>.

+

Major developers are Tim Hatch <tim@timhatch.com> and Armin Ronacher +<armin.ronacher@active-4.com>.

+

Other contributors, listed alphabetically, are:

+
    +
  • Sam Aaron – Ioke lexer

  • +
  • Ali Afshar – image formatter

  • +
  • Thomas Aglassinger – Easytrieve, JCL, Rexx, Transact-SQL and VBScript +lexers

  • +
  • Muthiah Annamalai – Ezhil lexer

  • +
  • Kumar Appaiah – Debian control lexer

  • +
  • Andreas Amann – AppleScript lexer

  • +
  • Timothy Armstrong – Dart lexer fixes

  • +
  • Jeffrey Arnold – R/S, Rd, BUGS, Jags, and Stan lexers

  • +
  • Jeremy Ashkenas – CoffeeScript lexer

  • +
  • José Joaquín Atria – Praat lexer

  • +
  • Stefan Matthias Aust – Smalltalk lexer

  • +
  • Lucas Bajolet – Nit lexer

  • +
  • Ben Bangert – Mako lexers

  • +
  • Max Battcher – Darcs patch lexer

  • +
  • Thomas Baruchel – APL lexer

  • +
  • Tim Baumann – (Literate) Agda lexer

  • +
  • Paul Baumgart, 280 North, Inc. – Objective-J lexer

  • +
  • Michael Bayer – Myghty lexers

  • +
  • Thomas Beale – Archetype lexers

  • +
  • John Benediktsson – Factor lexer

  • +
  • Trevor Bergeron – mIRC formatter

  • +
  • Vincent Bernat – LessCSS lexer

  • +
  • Christopher Bertels – Fancy lexer

  • +
  • Sébastien Bigaret – QVT Operational lexer

  • +
  • Jarrett Billingsley – MiniD lexer

  • +
  • Adam Blinkinsop – Haskell, Redcode lexers

  • +
  • Stéphane Blondon – SGF lexer

  • +
  • Frits van Bommel – assembler lexers

  • +
  • Pierre Bourdon – bugfixes

  • +
  • Matthias Bussonnier – ANSI style handling for terminal-256 formatter

  • +
  • chebee7i – Python traceback lexer improvements

  • +
  • Hiram Chirino – Scaml and Jade lexers

  • +
  • Mauricio Caceres – SAS and Stata lexers.

  • +
  • Ian Cooper – VGL lexer

  • +
  • David Corbett – Inform, Jasmin, JSGF, Snowball, and TADS 3 lexers

  • +
  • Leaf Corcoran – MoonScript lexer

  • +
  • Christopher Creutzig – MuPAD lexer

  • +
  • Daniël W. Crompton – Pike lexer

  • +
  • Pete Curry – bugfixes

  • +
  • Bryan Davis – EBNF lexer

  • +
  • Bruno Deferrari – Shen lexer

  • +
  • Giedrius Dubinskas – HTML formatter improvements

  • +
  • Owen Durni – Haxe lexer

  • +
  • Alexander Dutton, Oxford University Computing Services – SPARQL lexer

  • +
  • James Edwards – Terraform lexer

  • +
  • Nick Efford – Python 3 lexer

  • +
  • Sven Efftinge – Xtend lexer

  • +
  • Artem Egorkine – terminal256 formatter

  • +
  • Matthew Fernandez – CAmkES lexer

  • +
  • Michael Ficarra – CPSA lexer

  • +
  • James H. Fisher – PostScript lexer

  • +
  • William S. Fulton – SWIG lexer

  • +
  • Carlos Galdino – Elixir and Elixir Console lexers

  • +
  • Michael Galloy – IDL lexer

  • +
  • Naveen Garg – Autohotkey lexer

  • +
  • Laurent Gautier – R/S lexer

  • +
  • Alex Gaynor – PyPy log lexer

  • +
  • Richard Gerkin – Igor Pro lexer

  • +
  • Alain Gilbert – TypeScript lexer

  • +
  • Alex Gilding – BlitzBasic lexer

  • +
  • Bertrand Goetzmann – Groovy lexer

  • +
  • Krzysiek Goj – Scala lexer

  • +
  • Andrey Golovizin – BibTeX lexers

  • +
  • Matt Good – Genshi, Cheetah lexers

  • +
  • Michał Górny – vim modeline support

  • +
  • Alex Gosse – TrafficScript lexer

  • +
  • Patrick Gotthardt – PHP namespaces support

  • +
  • Olivier Guibe – Asymptote lexer

  • +
  • Phil Hagelberg – Fennel lexer

  • +
  • Florian Hahn – Boogie lexer

  • +
  • Martin Harriman – SNOBOL lexer

  • +
  • Matthew Harrison – SVG formatter

  • +
  • Steven Hazel – Tcl lexer

  • +
  • Dan Michael Heggø – Turtle lexer

  • +
  • Aslak Hellesøy – Gherkin lexer

  • +
  • Greg Hendershott – Racket lexer

  • +
  • Justin Hendrick – ParaSail lexer

  • +
  • Jordi Gutiérrez Hermoso – Octave lexer

  • +
  • David Hess, Fish Software, Inc. – Objective-J lexer

  • +
  • Varun Hiremath – Debian control lexer

  • +
  • Rob Hoelz – Perl 6 lexer

  • +
  • Doug Hogan – Mscgen lexer

  • +
  • Ben Hollis – Mason lexer

  • +
  • Max Horn – GAP lexer

  • +
  • Alastair Houghton – Lexer inheritance facility

  • +
  • Tim Howard – BlitzMax lexer

  • +
  • Dustin Howett – Logos lexer

  • +
  • Ivan Inozemtsev – Fantom lexer

  • +
  • Hiroaki Itoh – Shell console rewrite, Lexers for PowerShell session, +MSDOS session, BC, WDiff

  • +
  • Brian R. Jackson – Tea lexer

  • +
  • Christian Jann – ShellSession lexer

  • +
  • Dennis Kaarsemaker – sources.list lexer

  • +
  • Dmitri Kabak – Inferno Limbo lexer

  • +
  • Igor Kalnitsky – vhdl lexer

  • +
  • Alexander Kit – MaskJS lexer

  • +
  • Pekka Klärck – Robot Framework lexer

  • +
  • Gerwin Klein – Isabelle lexer

  • +
  • Eric Knibbe – Lasso lexer

  • +
  • Stepan Koltsov – Clay lexer

  • +
  • Adam Koprowski – Opa lexer

  • +
  • Benjamin Kowarsch – Modula-2 lexer

  • +
  • Domen Kožar – Nix lexer

  • +
  • Oleh Krekel – Emacs Lisp lexer

  • +
  • Alexander Kriegisch – Kconfig and AspectJ lexers

  • +
  • Marek Kubica – Scheme lexer

  • +
  • Jochen Kupperschmidt – Markdown processor

  • +
  • Gerd Kurzbach – Modelica lexer

  • +
  • Jon Larimer, Google Inc. – Smali lexer

  • +
  • Olov Lassus – Dart lexer

  • +
  • Matt Layman – TAP lexer

  • +
  • Kristian Lyngstøl – Varnish lexers

  • +
  • Sylvestre Ledru – Scilab lexer

  • +
  • Chee Sing Lee – Flatline lexer

  • +
  • Mark Lee – Vala lexer

  • +
  • Valentin Lorentz – C++ lexer improvements

  • +
  • Ben Mabey – Gherkin lexer

  • +
  • Angus MacArthur – QML lexer

  • +
  • Louis Mandel – X10 lexer

  • +
  • Louis Marchand – Eiffel lexer

  • +
  • Simone Margaritelli – Hybris lexer

  • +
  • Kirk McDonald – D lexer

  • +
  • Gordon McGregor – SystemVerilog lexer

  • +
  • Stephen McKamey – Duel/JBST lexer

  • +
  • Brian McKenna – F# lexer

  • +
  • Charles McLaughlin – Puppet lexer

  • +
  • Kurt McKee – Tera Term macro lexer

  • +
  • Lukas Meuser – BBCode formatter, Lua lexer

  • +
  • Cat Miller – Pig lexer

  • +
  • Paul Miller – LiveScript lexer

  • +
  • Hong Minhee – HTTP lexer

  • +
  • Michael Mior – Awk lexer

  • +
  • Bruce Mitchener – Dylan lexer rewrite

  • +
  • Reuben Morais – SourcePawn lexer

  • +
  • Jon Morton – Rust lexer

  • +
  • Paulo Moura – Logtalk lexer

  • +
  • Mher Movsisyan – DTD lexer

  • +
  • Dejan Muhamedagic – Crmsh lexer

  • +
  • Ana Nelson – Ragel, ANTLR, R console lexers

  • +
  • Kurt Neufeld – Markdown lexer

  • +
  • Nam T. Nguyen – Monokai style

  • +
  • Jesper Noehr – HTML formatter “anchorlinenos”

  • +
  • Mike Nolta – Julia lexer

  • +
  • Jonas Obrist – BBCode lexer

  • +
  • Edward O’Callaghan – Cryptol lexer

  • +
  • David Oliva – Rebol lexer

  • +
  • Pat Pannuto – nesC lexer

  • +
  • Jon Parise – Protocol buffers and Thrift lexers

  • +
  • Benjamin Peterson – Test suite refactoring

  • +
  • Ronny Pfannschmidt – BBCode lexer

  • +
  • Dominik Picheta – Nimrod lexer

  • +
  • Andrew Pinkham – RTF Formatter Refactoring

  • +
  • Clément Prévost – UrbiScript lexer

  • +
  • Tanner Prynn – cmdline -x option and loading lexers from files

  • +
  • Oleh Prypin – Crystal lexer (based on Ruby lexer)

  • +
  • Elias Rabel – Fortran fixed form lexer

  • +
  • raichoo – Idris lexer

  • +
  • Kashif Rasul – CUDA lexer

  • +
  • Nathan Reed – HLSL lexer

  • +
  • Justin Reidy – MXML lexer

  • +
  • Norman Richards – JSON lexer

  • +
  • Corey Richardson – Rust lexer updates

  • +
  • Lubomir Rintel – GoodData MAQL and CL lexers

  • +
  • Andre Roberge – Tango style

  • +
  • Georg Rollinger – HSAIL lexer

  • +
  • Michiel Roos – TypoScript lexer

  • +
  • Konrad Rudolph – LaTeX formatter enhancements

  • +
  • Mario Ruggier – Evoque lexers

  • +
  • Miikka Salminen – Lovelace style, Hexdump lexer, lexer enhancements

  • +
  • Stou Sandalski – NumPy, FORTRAN, tcsh and XSLT lexers

  • +
  • Matteo Sasso – Common Lisp lexer

  • +
  • Joe Schafer – Ada lexer

  • +
  • Ken Schutte – Matlab lexers

  • +
  • René Schwaiger – Rainbow Dash style

  • +
  • Sebastian Schweizer – Whiley lexer

  • +
  • Tassilo Schweyer – Io, MOOCode lexers

  • +
  • Ted Shaw – AutoIt lexer

  • +
  • Joerg Sieker – ABAP lexer

  • +
  • Robert Simmons – Standard ML lexer

  • +
  • Kirill Simonov – YAML lexer

  • +
  • Corbin Simpson – Monte lexer

  • +
  • Alexander Smishlajev – Visual FoxPro lexer

  • +
  • Steve Spigarelli – XQuery lexer

  • +
  • Jerome St-Louis – eC lexer

  • +
  • Camil Staps – Clean and NuSMV lexers; Solarized style

  • +
  • James Strachan – Kotlin lexer

  • +
  • Tom Stuart – Treetop lexer

  • +
  • Colin Sullivan – SuperCollider lexer

  • +
  • Ben Swift – Extempore lexer

  • +
  • Edoardo Tenani – Arduino lexer

  • +
  • Tiberius Teng – default style overhaul

  • +
  • Jeremy Thurgood – Erlang, Squid config lexers

  • +
  • Brian Tiffin – OpenCOBOL lexer

  • +
  • Bob Tolbert – Hy lexer

  • +
  • Matthias Trute – Forth lexer

  • +
  • Erick Tryzelaar – Felix lexer

  • +
  • Alexander Udalov – Kotlin lexer improvements

  • +
  • Thomas Van Doren – Chapel lexer

  • +
  • Daniele Varrazzo – PostgreSQL lexers

  • +
  • Abe Voelker – OpenEdge ABL lexer

  • +
  • Pepijn de Vos – HTML formatter CTags support

  • +
  • Matthias Vallentin – Bro lexer

  • +
  • Benoît Vinot – AMPL lexer

  • +
  • Linh Vu Hong – RSL lexer

  • +
  • Nathan Weizenbaum – Haml and Sass lexers

  • +
  • Nathan Whetsell – Csound lexers

  • +
  • Dietmar Winkler – Modelica lexer

  • +
  • Nils Winter – Smalltalk lexer

  • +
  • Davy Wybiral – Clojure lexer

  • +
  • Whitney Young – ObjectiveC lexer

  • +
  • Diego Zamboni – CFengine3 lexer

  • +
  • Enrique Zamudio – Ceylon lexer

  • +
  • Alex Zimin – Nemerle lexer

  • +
  • Rob Zimmerman – Kal lexer

  • +
  • Vincent Zurczak – Roboconf lexer

  • +
  • Rostyslav Golda – FloScript lexer

  • +
  • GitHub, Inc – DASM16, Augeas, TOML, and Slash lexers

  • +
  • Simon Garnotel – FreeFem++ lexer

  • +
+

Many thanks for all contributions!

+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/docs/changelog.html b/doc/_build/html/docs/changelog.html new file mode 100644 index 0000000..d75d677 --- /dev/null +++ b/doc/_build/html/docs/changelog.html @@ -0,0 +1,1333 @@ + + + + + + + Pygments changelog — Pygments + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Pygments changelog¶

+

Since 2.5.0, issue numbers refer to the tracker at +<https://github.com/pygments/pygments/issues>, +pull request numbers to the requests at +<https://github.com/pygments/pygments/pulls>.

+
+

Version 2.5.0¶

+
    +
  • Added lexers:

    +
      +
    • Email (PR#1246)

    • +
    • Erlang, Elxir shells (PR#823, #1521)

    • +
    • Notmuch (PR#1264)

    • +
    • Scdoc (PR#1268)

    • +
    • Solidity (#1214)

    • +
    • Zeek (new name for Bro) (PR#1269)

    • +
    • Zig (PR#820)

    • +
    +
  • +
  • Updated lexers:

    +
      +
    • Apache2 Configuration (PR#1251)

    • +
    • Bash sessions (#1253)

    • +
    • CSound (PR#1250)

    • +
    • Dart

    • +
    • Dockerfile

    • +
    • Emacs Lisp

    • +
    • Handlebars (PR#773)

    • +
    • Java (#1101, #987)

    • +
    • Logtalk (PR#1261)

    • +
    • Matlab (PR#1271)

    • +
    • Praat (PR#1277)

    • +
    • Python3 (PR#1255)

    • +
    • Ruby

    • +
    • YAML (#1528)

    • +
    • Velocity

    • +
    +
  • +
  • Added styles:

    +
      +
    • Inkpot (PR#1276)

    • +
    +
  • +
  • The PythonLexer class is now an alias for the former Python3Lexer. +The old PythonLexer is available as Python2Lexer. Same change has +been done for the PythonTracebackLexer. The python3 option for +the PythonConsoleLexer is now true by default.

  • +
  • Bump NasmLexer priority over TasmLexer for .asm files +(fixes #1326)

  • +
  • Default font in the ImageFormatter has been updated (#928, PR#1245)

  • +
  • Test suite switched to py.test, removed nose dependency (#1490)

  • +
  • Reduce TeraTerm lexer score – it used to match nearly all languages +(#1256)

  • +
  • Treat Skylark/Starlark files as Python files (PR#1259)

  • +
  • Image formatter: actually respect line_number_separator option

  • +
  • Add LICENSE file to wheel builds

  • +
  • Agda: fix lambda highlighting

  • +
  • Dart: support @ annotations

  • +
  • Dockerfile: accept FROM ... AS syntax

  • +
  • Emacs Lisp: add more string functions

  • +
  • GAS: accept registers in directive arguments

  • +
  • Java: make structural punctuation (braces, parens, colon, comma) Punctuation, not Operator (#987)

  • +
  • Java: support var contextual keyword (#1101)

  • +
  • Matlab: Fix recognition of function keyword (PR#1271)

  • +
  • Python: recognize .jy filenames (#976)

  • +
  • Python: recognize f string prefix (#1156)

  • +
  • Ruby: support squiggly heredocs

  • +
  • Shell sessions: recognize Virtualenv prompt (PR#1266)

  • +
  • Velocity: support silent reference syntax

  • +
+
+
+

Version 2.4.2¶

+

(released May 28, 2019)

+
    +
  • Fix encoding error when guessing lexer with given encoding option +(#1438)

  • +
+
+
+

Version 2.4.1¶

+

(released May 24, 2019)

+
    +
  • Updated lexers:

    +
      +
    • Coq (#1430)

    • +
    • MSDOS Session (PR#734)

    • +
    • NASM (#1517)

    • +
    • Objective-C (PR#813, #1508)

    • +
    • Prolog (#1511)

    • +
    • TypeScript (#1515)

    • +
    +
  • +
  • Support CSS variables in stylesheets (PR#814, #1356)

  • +
  • Fix F# lexer name (PR#709)

  • +
  • Fix TerminalFormatter using bold for bright text (#1480)

  • +
+
+
+

Version 2.4.0¶

+

(released May 8, 2019)

+
    +
  • Added lexers:

    +
      +
    • Augeas (PR#807)

    • +
    • BBC Basic (PR#806)

    • +
    • Boa (PR#756)

    • +
    • Charm++ CI (PR#788)

    • +
    • DASM16 (PR#807)

    • +
    • FloScript (PR#750)

    • +
    • FreeFem++ (PR#785)

    • +
    • Hspec (PR#790)

    • +
    • Pony (PR#627)

    • +
    • SGF (PR#780)

    • +
    • Slash (PR#807)

    • +
    • Slurm (PR#760)

    • +
    • Tera Term Language (PR#749)

    • +
    • TOML (PR#807)

    • +
    • Unicon (PR#731)

    • +
    • VBScript (PR#673)

    • +
    +
  • +
  • Updated lexers:

    +
      +
    • Apache2 (PR#766)

    • +
    • Cypher (PR#746)

    • +
    • LLVM (PR#792)

    • +
    • Makefiles (PR#766)

    • +
    • PHP (#1482)

    • +
    • Rust

    • +
    • SQL (PR#672)

    • +
    • Stan (PR#774)

    • +
    • Stata (PR#800)

    • +
    • Terraform (PR#787)

    • +
    • YAML

    • +
    +
  • +
  • Add solarized style (PR#708)

  • +
  • Add support for Markdown reference-style links (PR#753)

  • +
  • Add license information to generated HTML/CSS files (#1496)

  • +
  • Change ANSI color names (PR#777)

  • +
  • Fix catastrophic backtracking in the bash lexer (#1494)

  • +
  • Fix documentation failing to build using Sphinx 2.0 (#1501)

  • +
  • Fix incorrect links in the Lisp and R lexer documentation (PR#775)

  • +
  • Fix rare unicode errors on Python 2.7 (PR#798, #1492)

  • +
  • Fix lexers popping from an empty stack (#1506)

  • +
  • TypoScript uses .typoscript now (#1498)

  • +
  • Updated Trove classifiers and pip requirements (PR#799)

  • +
+
+
+

Version 2.3.1¶

+

(released Dec 16, 2018)

+
    +
  • Updated lexers:

    +
      +
    • ASM (PR#784)

    • +
    • Chapel (PR#735)

    • +
    • Clean (PR#621)

    • +
    • CSound (PR#684)

    • +
    • Elm (PR#744)

    • +
    • Fortran (PR#747)

    • +
    • GLSL (PR#740)

    • +
    • Haskell (PR#745)

    • +
    • Hy (PR#754)

    • +
    • Igor Pro (PR#764)

    • +
    • PowerShell (PR#705)

    • +
    • Python (PR#720, #1299, PR#715)

    • +
    • SLexer (PR#680)

    • +
    • YAML (PR#762, PR#724)

    • +
    +
  • +
  • Fix invalid string escape sequences

  • +
  • Fix FutureWarning introduced by regex changes in Python 3.7

  • +
+
+
+

Version 2.3.0¶

+

(released Nov 25, 2018)

+
    +
  • Added lexers:

    +
      +
    • Fennel (PR#783)

    • +
    • HLSL (PR#675)

    • +
    +
  • +
  • Updated lexers:

    +
      +
    • Dockerfile (PR#714)

    • +
    +
  • +
  • Minimum Python versions changed to 2.7 and 3.5

  • +
  • Added support for Python 3.7 generator changes (PR#772)

  • +
  • Fix incorrect token type in SCSS for single-quote strings (#1322)

  • +
  • Use terminal256 formatter if TERM contains 256 (PR#666)

  • +
  • Fix incorrect handling of GitHub style fences in Markdown (PR#741, #1389)

  • +
  • Fix %a not being highlighted in Python3 strings (PR#727)

  • +
+
+
+

Version 2.2.0¶

+

(released Jan 22, 2017)

+
    +
  • Added lexers:

    +
      +
    • AMPL

    • +
    • TypoScript (#1173)

    • +
    • Varnish config (PR#554)

    • +
    • Clean (PR#503)

    • +
    • WDiff (PR#513)

    • +
    • Flatline (PR#551)

    • +
    • Silver (PR#537)

    • +
    • HSAIL (PR#518)

    • +
    • JSGF (PR#546)

    • +
    • NCAR command language (PR#536)

    • +
    • Extempore (PR#530)

    • +
    • Cap’n Proto (PR#595)

    • +
    • Whiley (PR#573)

    • +
    • Monte (PR#592)

    • +
    • Crystal (PR#576)

    • +
    • Snowball (PR#589)

    • +
    • CapDL (PR#579)

    • +
    • NuSMV (PR#564)

    • +
    • SAS, Stata (PR#593)

    • +
    +
  • +
  • Added the ability to load lexer and formatter classes directly from files +with the -x command line option and the lexers.load_lexer_from_file() +and formatters.load_formatter_from_file() functions. (PR#559)

  • +
  • Added lexers.find_lexer_class_by_name(). (#1203)

  • +
  • Added new token types and lexing for magic methods and variables in Python +and PHP.

  • +
  • Added a new token type for string affixes and lexing for them in Python, C++ +and Postgresql lexers.

  • +
  • Added a new token type for heredoc (and similar) string delimiters and +lexing for them in C++, Perl, PHP, Postgresql and Ruby lexers.

  • +
  • Styles can now define colors with ANSI colors for use in the 256-color +terminal formatter. (PR#531)

  • +
  • Improved the CSS lexer. (#1083, #1130)

  • +
  • Added “Rainbow Dash” style. (PR#623)

  • +
  • Delay loading pkg_resources, which takes a long while to import. (PR#690)

  • +
+
+
+

Version 2.1.3¶

+

(released Mar 2, 2016)

+
    +
  • Fixed regression in Bash lexer (PR#563)

  • +
+
+
+

Version 2.1.2¶

+

(released Feb 29, 2016)

+
    +
  • Fixed Python 3 regression in image formatter (#1215)

  • +
  • Fixed regression in Bash lexer (PR#562)

  • +
+
+
+

Version 2.1.1¶

+

(relased Feb 14, 2016)

+
    +
  • Fixed Jython compatibility (#1205)

  • +
  • Fixed HTML formatter output with leading empty lines (#1111)

  • +
  • Added a mapping table for LaTeX encodings and added utf8 (#1152)

  • +
  • Fixed image formatter font searching on Macs (#1188)

  • +
  • Fixed deepcopy-ing of Token instances (#1168)

  • +
  • Fixed Julia string interpolation (#1170)

  • +
  • Fixed statefulness of HttpLexer between get_tokens calls

  • +
  • Many smaller fixes to various lexers

  • +
+
+
+

Version 2.1¶

+

(released Jan 17, 2016)

+
    +
  • Added lexers:

    +
      +
    • Emacs Lisp (PR#431)

    • +
    • Arduino (PR#442)

    • +
    • Modula-2 with multi-dialect support (#1090)

    • +
    • Fortran fixed format (PR#213)

    • +
    • Archetype Definition language (PR#483)

    • +
    • Terraform (PR#432)

    • +
    • Jcl, Easytrieve (PR#208)

    • +
    • ParaSail (PR#381)

    • +
    • Boogie (PR#420)

    • +
    • Turtle (PR#425)

    • +
    • Fish Shell (PR#422)

    • +
    • Roboconf (PR#449)

    • +
    • Test Anything Protocol (PR#428)

    • +
    • Shen (PR#385)

    • +
    • Component Pascal (PR#437)

    • +
    • SuperCollider (PR#472)

    • +
    • Shell consoles (Tcsh, PowerShell, MSDOS) (PR#479)

    • +
    • Elm and J (PR#452)

    • +
    • Crmsh (PR#440)

    • +
    • Praat (PR#492)

    • +
    • CSound (PR#494)

    • +
    • Ezhil (PR#443)

    • +
    • Thrift (PR#469)

    • +
    • QVT Operational (PR#204)

    • +
    • Hexdump (PR#508)

    • +
    • CAmkES Configuration (PR#462)

    • +
    +
  • +
  • Added styles:

    +
      +
    • Lovelace (PR#456)

    • +
    • Algol and Algol-nu (#1090)

    • +
    +
  • +
  • Added formatters:

    +
      +
    • IRC (PR#458)

    • +
    • True color (24-bit) terminal ANSI sequences (#1142) +(formatter alias: “16m”)

    • +
    +
  • +
  • New “filename” option for HTML formatter (PR#527).

  • +
  • Improved performance of the HTML formatter for long lines (PR#504).

  • +
  • Updated autopygmentize script (PR#445).

  • +
  • Fixed style inheritance for non-standard token types in HTML output.

  • +
  • Added support for async/await to Python 3 lexer.

  • +
  • Rewrote linenos option for TerminalFormatter (it’s better, but slightly +different output than before) (#1147).

  • +
  • Javascript lexer now supports most of ES6 (#1100).

  • +
  • Cocoa builtins updated for iOS 8.1 (PR#433).

  • +
  • Combined BashSessionLexer and ShellSessionLexer, new version should support +the prompt styles of either.

  • +
  • Added option to pygmentize to show a full traceback on exceptions.

  • +
  • Fixed incomplete output on Windows and Python 3 (e.g. when using iPython +Notebook) (#1153).

  • +
  • Allowed more traceback styles in Python console lexer (PR#253).

  • +
  • Added decorators to TypeScript (PR#509).

  • +
  • Fix highlighting of certain IRC logs formats (#1076).

  • +
+
+
+

Version 2.0.2¶

+

(released Jan 20, 2015)

+
    +
  • Fix Python tracebacks getting duplicated in the console lexer (#1068).

  • +
  • Backquote-delimited identifiers are now recognized in F# (#1062).

  • +
+
+
+

Version 2.0.1¶

+

(released Nov 10, 2014)

+
    +
  • Fix an encoding issue when using pygmentize with the -o option.

  • +
+
+
+

Version 2.0¶

+

(released Nov 9, 2014)

+
    +
  • Default lexer encoding is now “guess”, i.e. UTF-8 / Locale / Latin1 is +tried in that order.

  • +
  • Major update to Swift lexer (PR#410).

  • +
  • Multiple fixes to lexer guessing in conflicting cases:

    +
      +
    • recognize HTML5 by doctype

    • +
    • recognize XML by XML declaration

    • +
    • don’t recognize C/C++ as SystemVerilog

    • +
    +
  • +
  • Simplified regexes and builtin lists.

  • +
+
+
+

Version 2.0rc1¶

+

(released Oct 16, 2014)

+
    +
  • Dropped Python 2.4 and 2.5 compatibility. This is in favor of single-source +compatibility between Python 2.6, 2.7 and 3.3+.

  • +
  • New website and documentation based on Sphinx (finally!)

  • +
  • Lexers added:

    +
      +
    • APL (#969)

    • +
    • Agda and Literate Agda (PR#203)

    • +
    • Alloy (PR#355)

    • +
    • AmbientTalk

    • +
    • BlitzBasic (PR#197)

    • +
    • ChaiScript (PR#24)

    • +
    • Chapel (PR#256)

    • +
    • Cirru (PR#275)

    • +
    • Clay (PR#184)

    • +
    • ColdFusion CFC (PR#283)

    • +
    • Cryptol and Literate Cryptol (PR#344)

    • +
    • Cypher (PR#257)

    • +
    • Docker config files

    • +
    • EBNF (PR#193)

    • +
    • Eiffel (PR#273)

    • +
    • GAP (PR#311)

    • +
    • Golo (PR#309)

    • +
    • Handlebars (PR#186)

    • +
    • Hy (PR#238)

    • +
    • Idris and Literate Idris (PR#210)

    • +
    • Igor Pro (PR#172)

    • +
    • Inform 6/7 (PR#281)

    • +
    • Intel objdump (PR#279)

    • +
    • Isabelle (PR#386)

    • +
    • Jasmin (PR#349)

    • +
    • JSON-LD (PR#289)

    • +
    • Kal (PR#233)

    • +
    • Lean (PR#399)

    • +
    • LSL (PR#296)

    • +
    • Limbo (PR#291)

    • +
    • Liquid (#977)

    • +
    • MQL (PR#285)

    • +
    • MaskJS (PR#280)

    • +
    • Mozilla preprocessors

    • +
    • Mathematica (PR#245)

    • +
    • NesC (PR#166)

    • +
    • Nit (PR#375)

    • +
    • Nix (PR#267)

    • +
    • Pan

    • +
    • Pawn (PR#211)

    • +
    • Perl 6 (PR#181)

    • +
    • Pig (PR#304)

    • +
    • Pike (PR#237)

    • +
    • QBasic (PR#182)

    • +
    • Red (PR#341)

    • +
    • ResourceBundle (#1038)

    • +
    • Rexx (PR#199)

    • +
    • Rql (PR#251)

    • +
    • Rsl

    • +
    • SPARQL (PR#78)

    • +
    • Slim (PR#366)

    • +
    • Swift (PR#371)

    • +
    • Swig (PR#168)

    • +
    • TADS 3 (PR#407)

    • +
    • Todo.txt todo lists

    • +
    • Twig (PR#404)

    • +
    +
  • +
  • Added a helper to “optimize” regular expressions that match one of many +literal words; this can save 20% and more lexing time with lexers that +highlight many keywords or builtins.

  • +
  • New styles: “xcode” and “igor”, similar to the default highlighting of +the respective IDEs.

  • +
  • The command-line “pygmentize” tool now tries a little harder to find the +correct encoding for files and the terminal (#979).

  • +
  • Added “inencoding” option for lexers to override “encoding” analogous +to “outencoding” (#800).

  • +
  • Added line-by-line “streaming” mode for pygmentize with the “-s” option. +(PR#165) Only fully works for lexers that have no constructs spanning +lines!

  • +
  • Added an “envname” option to the LaTeX formatter to select a replacement +verbatim environment (PR#235).

  • +
  • Updated the Makefile lexer to yield a little more useful highlighting.

  • +
  • Lexer aliases passed to get_lexer_by_name() are now case-insensitive.

  • +
  • File name matching in lexers and formatters will now use a regex cache +for speed (PR#205).

  • +
  • Pygments will now recognize “vim” modelines when guessing the lexer for +a file based on content (PR#118).

  • +
  • Major restructure of the pygments.lexers module namespace. There are now +many more modules with less lexers per module. Old modules are still around +and re-export the lexers they previously contained.

  • +
  • The NameHighlightFilter now works with any Name.* token type (#790).

  • +
  • Python 3 lexer: add new exceptions from PEP 3151.

  • +
  • Opa lexer: add new keywords (PR#170).

  • +
  • Julia lexer: add keywords and underscore-separated number +literals (PR#176).

  • +
  • Lasso lexer: fix method highlighting, update builtins. Fix +guessing so that plain XML isn’t always taken as Lasso (PR#163).

  • +
  • Objective C/C++ lexers: allow “@” prefixing any expression (#871).

  • +
  • Ruby lexer: fix lexing of Name::Space tokens (#860) and of symbols +in hashes (#873).

  • +
  • Stan lexer: update for version 2.4.0 of the language (PR#162, PR#255, PR#377).

  • +
  • JavaScript lexer: add the “yield” keyword (PR#196).

  • +
  • HTTP lexer: support for PATCH method (PR#190).

  • +
  • Koka lexer: update to newest language spec (PR#201).

  • +
  • Haxe lexer: rewrite and support for Haxe 3 (PR#174).

  • +
  • Prolog lexer: add different kinds of numeric literals (#864).

  • +
  • F# lexer: rewrite with newest spec for F# 3.0 (#842), fix a bug with +dotted chains (#948).

  • +
  • Kotlin lexer: general update (PR#271).

  • +
  • Rebol lexer: fix comment detection and analyse_text (PR#261).

  • +
  • LLVM lexer: update keywords to v3.4 (PR#258).

  • +
  • PHP lexer: add new keywords and binary literals (PR#222).

  • +
  • external/markdown-processor.py updated to newest python-markdown (PR#221).

  • +
  • CSS lexer: some highlighting order fixes (PR#231).

  • +
  • Ceylon lexer: fix parsing of nested multiline comments (#915).

  • +
  • C family lexers: fix parsing of indented preprocessor directives (#944).

  • +
  • Rust lexer: update to 0.9 language version (PR#270, PR#388).

  • +
  • Elixir lexer: update to 0.15 language version (PR#392).

  • +
  • Fix swallowing incomplete tracebacks in Python console lexer (#874).

  • +
+
+
+

Version 1.6¶

+

(released Feb 3, 2013)

+
    +
  • Lexers added:

    +
      +
    • Dylan console (PR#149)

    • +
    • Logos (PR#150)

    • +
    • Shell sessions (PR#158)

    • +
    +
  • +
  • Fix guessed lexers not receiving lexer options (#838).

  • +
  • Fix unquoted HTML attribute lexing in Opa (#841).

  • +
  • Fixes to the Dart lexer (PR#160).

  • +
+
+
+

Version 1.6rc1¶

+

(released Jan 9, 2013)

+
    +
  • Lexers added:

    +
      +
    • AspectJ (PR#90)

    • +
    • AutoIt (PR#122)

    • +
    • BUGS-like languages (PR#89)

    • +
    • Ceylon (PR#86)

    • +
    • Croc (new name for MiniD)

    • +
    • CUDA (PR#75)

    • +
    • Dg (PR#116)

    • +
    • IDL (PR#115)

    • +
    • Jags (PR#89)

    • +
    • Julia (PR#61)

    • +
    • Kconfig (#711)

    • +
    • Lasso (PR#95, PR#113)

    • +
    • LiveScript (PR#84)

    • +
    • Monkey (PR#117)

    • +
    • Mscgen (PR#80)

    • +
    • NSIS scripts (PR#136)

    • +
    • OpenCOBOL (PR#72)

    • +
    • QML (PR#123)

    • +
    • Puppet (PR#133)

    • +
    • Racket (PR#94)

    • +
    • Rdoc (PR#99)

    • +
    • Robot Framework (PR#137)

    • +
    • RPM spec files (PR#124)

    • +
    • Rust (PR#67)

    • +
    • Smali (Dalvik assembly)

    • +
    • SourcePawn (PR#39)

    • +
    • Stan (PR#89)

    • +
    • Treetop (PR#125)

    • +
    • TypeScript (PR#114)

    • +
    • VGL (PR#12)

    • +
    • Visual FoxPro (#762)

    • +
    • Windows Registry (#819)

    • +
    • Xtend (PR#68)

    • +
    +
  • +
  • The HTML formatter now supports linking to tags using CTags files, when the +python-ctags package is installed (PR#87).

  • +
  • The HTML formatter now has a “linespans” option that wraps every line in a +<span> tag with a specific id (PR#82).

  • +
  • When deriving a lexer from another lexer with token definitions, definitions +for states not in the child lexer are now inherited. If you override a state +in the child lexer, an “inherit” keyword has been added to insert the base +state at that position (PR#141).

  • +
  • The C family lexers now inherit token definitions from a common base class, +removing code duplication (PR#141).

  • +
  • Use “colorama” on Windows for console color output (PR#142).

  • +
  • Fix Template Haskell highlighting (PR#63).

  • +
  • Fix some S/R lexer errors (PR#91).

  • +
  • Fix a bug in the Prolog lexer with names that start with ‘is’ (#810).

  • +
  • Rewrite Dylan lexer, add Dylan LID lexer (PR#147).

  • +
  • Add a Java quickstart document (PR#146).

  • +
  • Add a “external/autopygmentize” file that can be used as .lessfilter (#802).

  • +
+
+
+

Version 1.5¶

+

(codename Zeitdilatation, released Mar 10, 2012)

+
    +
  • Lexers added:

    +
      +
    • Awk (#630)

    • +
    • Fancy (#633)

    • +
    • PyPy Log

    • +
    • eC

    • +
    • Nimrod

    • +
    • Nemerle (#667)

    • +
    • F# (#353)

    • +
    • Groovy (#501)

    • +
    • PostgreSQL (#660)

    • +
    • DTD

    • +
    • Gosu (#634)

    • +
    • Octave (PR#22)

    • +
    • Standard ML (PR#14)

    • +
    • CFengine3 (#601)

    • +
    • Opa (PR#37)

    • +
    • HTTP sessions (PR#42)

    • +
    • JSON (PR#31)

    • +
    • SNOBOL (PR#30)

    • +
    • MoonScript (PR#43)

    • +
    • ECL (PR#29)

    • +
    • Urbiscript (PR#17)

    • +
    • OpenEdge ABL (PR#27)

    • +
    • SystemVerilog (PR#35)

    • +
    • Coq (#734)

    • +
    • PowerShell (#654)

    • +
    • Dart (#715)

    • +
    • Fantom (PR#36)

    • +
    • Bro (PR#5)

    • +
    • NewLISP (PR#26)

    • +
    • VHDL (PR#45)

    • +
    • Scilab (#740)

    • +
    • Elixir (PR#57)

    • +
    • Tea (PR#56)

    • +
    • Kotlin (PR#58)

    • +
    +
  • +
  • Fix Python 3 terminal highlighting with pygmentize (#691).

  • +
  • In the LaTeX formatter, escape special &, < and > chars (#648).

  • +
  • In the LaTeX formatter, fix display problems for styles with token +background colors (#670).

  • +
  • Enhancements to the Squid conf lexer (#664).

  • +
  • Several fixes to the reStructuredText lexer (#636).

  • +
  • Recognize methods in the ObjC lexer (#638).

  • +
  • Fix Lua “class” highlighting: it does not have classes (#665).

  • +
  • Fix degenerate regex in Scala lexer (#671) and highlighting bugs (#713, 708).

  • +
  • Fix number pattern order in Ocaml lexer (#647).

  • +
  • Fix generic type highlighting in ActionScript 3 (#666).

  • +
  • Fixes to the Clojure lexer (PR#9).

  • +
  • Fix degenerate regex in Nemerle lexer (#706).

  • +
  • Fix infinite looping in CoffeeScript lexer (#729).

  • +
  • Fix crashes and analysis with ObjectiveC lexer (#693, #696).

  • +
  • Add some Fortran 2003 keywords.

  • +
  • Fix Boo string regexes (#679).

  • +
  • Add “rrt” style (#727).

  • +
  • Fix infinite looping in Darcs Patch lexer.

  • +
  • Lots of misc fixes to character-eating bugs and ordering problems in many +different lexers.

  • +
+
+
+

Version 1.4¶

+

(codename Unschärfe, released Jan 03, 2011)

+
    +
  • Lexers added:

    +
      +
    • Factor (#520)

    • +
    • PostScript (#486)

    • +
    • Verilog (#491)

    • +
    • BlitzMax Basic (#478)

    • +
    • Ioke (#465)

    • +
    • Java properties, split out of the INI lexer (#445)

    • +
    • Scss (#509)

    • +
    • Duel/JBST

    • +
    • XQuery (#617)

    • +
    • Mason (#615)

    • +
    • GoodData (#609)

    • +
    • SSP (#473)

    • +
    • Autohotkey (#417)

    • +
    • Google Protocol Buffers

    • +
    • Hybris (#506)

    • +
    +
  • +
  • Do not fail in analyse_text methods (#618).

  • +
  • Performance improvements in the HTML formatter (#523).

  • +
  • With the noclasses option in the HTML formatter, some styles +present in the stylesheet were not added as inline styles.

  • +
  • Four fixes to the Lua lexer (#480, #481, #482, #497).

  • +
  • More context-sensitive Gherkin lexer with support for more i18n translations.

  • +
  • Support new OO keywords in Matlab lexer (#521).

  • +
  • Small fix in the CoffeeScript lexer (#519).

  • +
  • A bugfix for backslashes in ocaml strings (#499).

  • +
  • Fix unicode/raw docstrings in the Python lexer (#489).

  • +
  • Allow PIL to work without PIL.pth (#502).

  • +
  • Allow seconds as a unit in CSS (#496).

  • +
  • Support application/javascript as a JavaScript mime type (#504).

  • +
  • Support Offload C++ Extensions as +keywords in the C++ lexer (#484).

  • +
  • Escape more characters in LaTeX output (#505).

  • +
  • Update Haml/Sass lexers to version 3 (#509).

  • +
  • Small PHP lexer string escaping fix (#515).

  • +
  • Support comments before preprocessor directives, and unsigned/ +long long literals in C/C++ (#613, #616).

  • +
  • Support line continuations in the INI lexer (#494).

  • +
  • Fix lexing of Dylan string and char literals (#628).

  • +
  • Fix class/procedure name highlighting in VB.NET lexer (#624).

  • +
+
+
+

Version 1.3.1¶

+

(bugfix release, released Mar 05, 2010)

+
    +
  • The pygmentize script was missing from the distribution.

  • +
+
+
+

Version 1.3¶

+

(codename Schneeglöckchen, released Mar 01, 2010)

+
    +
  • Added the ensurenl lexer option, which can be used to suppress the +automatic addition of a newline to the lexer input.

  • +
  • Lexers added:

    +
      +
    • Ada

    • +
    • Coldfusion

    • +
    • Modula-2

    • +
    • Haxe

    • +
    • R console

    • +
    • Objective-J

    • +
    • Haml and Sass

    • +
    • CoffeeScript

    • +
    +
  • +
  • Enhanced reStructuredText highlighting.

  • +
  • Added support for PHP 5.3 namespaces in the PHP lexer.

  • +
  • Added a bash completion script for pygmentize, to the external/ +directory (#466).

  • +
  • Fixed a bug in do_insertions() used for multi-lexer languages.

  • +
  • Fixed a Ruby regex highlighting bug (#476).

  • +
  • Fixed regex highlighting bugs in Perl lexer (#258).

  • +
  • Add small enhancements to the C lexer (#467) and Bash lexer (#469).

  • +
  • Small fixes for the Tcl, Debian control file, Nginx config, +Smalltalk, Objective-C, Clojure, Lua lexers.

  • +
  • Gherkin lexer: Fixed single apostrophe bug and added new i18n keywords.

  • +
+
+
+

Version 1.2.2¶

+

(bugfix release, released Jan 02, 2010)

+
    +
  • Removed a backwards incompatibility in the LaTeX formatter that caused +Sphinx to produce invalid commands when writing LaTeX output (#463).

  • +
  • Fixed a forever-backtracking regex in the BashLexer (#462).

  • +
+
+
+

Version 1.2.1¶

+

(bugfix release, released Jan 02, 2010)

+
    +
  • Fixed mishandling of an ellipsis in place of the frames in a Python +console traceback, resulting in clobbered output.

  • +
+
+
+

Version 1.2¶

+

(codename Neujahr, released Jan 01, 2010)

+
    +
  • Dropped Python 2.3 compatibility.

  • +
  • Lexers added:

    +
      +
    • Asymptote

    • +
    • Go

    • +
    • Gherkin (Cucumber)

    • +
    • CMake

    • +
    • Ooc

    • +
    • Coldfusion

    • +
    • Haxe

    • +
    • R console

    • +
    +
  • +
  • Added options for rendering LaTeX in source code comments in the +LaTeX formatter (#461).

  • +
  • Updated the Logtalk lexer.

  • +
  • Added line_number_start option to image formatter (#456).

  • +
  • Added hl_lines and hl_color options to image formatter (#457).

  • +
  • Fixed the HtmlFormatter’s handling of noclasses=True to not output any +classes (#427).

  • +
  • Added the Monokai style (#453).

  • +
  • Fixed LLVM lexer identifier syntax and added new keywords (#442).

  • +
  • Fixed the PythonTracebackLexer to handle non-traceback data in header or +trailer, and support more partial tracebacks that start on line 2 (#437).

  • +
  • Fixed the CLexer to not highlight ternary statements as labels.

  • +
  • Fixed lexing of some Ruby quoting peculiarities (#460).

  • +
  • A few ASM lexer fixes (#450).

  • +
+
+
+

Version 1.1.1¶

+

(bugfix release, released Sep 15, 2009)

+
    +
  • Fixed the BBCode lexer (#435).

  • +
  • Added support for new Jinja2 keywords.

  • +
  • Fixed test suite failures.

  • +
  • Added Gentoo-specific suffixes to Bash lexer.

  • +
+
+
+

Version 1.1¶

+

(codename Brillouin, released Sep 11, 2009)

+
    +
  • Ported Pygments to Python 3. This needed a few changes in the way +encodings are handled; they may affect corner cases when used with +Python 2 as well.

  • +
  • Lexers added:

    +
      +
    • Antlr/Ragel, thanks to Ana Nelson

    • +
    • (Ba)sh shell

    • +
    • Erlang shell

    • +
    • GLSL

    • +
    • Prolog

    • +
    • Evoque

    • +
    • Modelica

    • +
    • Rebol

    • +
    • MXML

    • +
    • Cython

    • +
    • ABAP

    • +
    • ASP.net (VB/C#)

    • +
    • Vala

    • +
    • Newspeak

    • +
    +
  • +
  • Fixed the LaTeX formatter’s output so that output generated for one style +can be used with the style definitions of another (#384).

  • +
  • Added “anchorlinenos” and “noclobber_cssfile” (#396) options to HTML +formatter.

  • +
  • Support multiline strings in Lua lexer.

  • +
  • Rewrite of the JavaScript lexer by Pumbaa80 to better support regular +expression literals (#403).

  • +
  • When pygmentize is asked to highlight a file for which multiple lexers +match the filename, use the analyse_text guessing engine to determine the +winner (#355).

  • +
  • Fixed minor bugs in the JavaScript lexer (#383), the Matlab lexer (#378), +the Scala lexer (#392), the INI lexer (#391), the Clojure lexer (#387) +and the AS3 lexer (#389).

  • +
  • Fixed three Perl heredoc lexing bugs (#379, #400, #422).

  • +
  • Fixed a bug in the image formatter which misdetected lines (#380).

  • +
  • Fixed bugs lexing extended Ruby strings and regexes.

  • +
  • Fixed a bug when lexing git diffs.

  • +
  • Fixed a bug lexing the empty commit in the PHP lexer (#405).

  • +
  • Fixed a bug causing Python numbers to be mishighlighted as floats (#397).

  • +
  • Fixed a bug when backslashes are used in odd locations in Python (#395).

  • +
  • Fixed various bugs in Matlab and S-Plus lexers, thanks to Winston Chang (#410, +#411, #413, #414) and fmarc (#419).

  • +
  • Fixed a bug in Haskell single-line comment detection (#426).

  • +
  • Added new-style reStructuredText directive for docutils 0.5+ (#428).

  • +
+
+
+

Version 1.0¶

+

(codename Dreiundzwanzig, released Nov 23, 2008)

+
    +
  • Don’t use join(splitlines()) when converting newlines to \n, +because that doesn’t keep all newlines at the end when the +stripnl lexer option is False.

  • +
  • Added -N option to command-line interface to get a lexer name +for a given filename.

  • +
  • Added Tango style, written by Andre Roberge for the Crunchy project.

  • +
  • Added Python3TracebackLexer and python3 option to +PythonConsoleLexer.

  • +
  • Fixed a few bugs in the Haskell lexer.

  • +
  • Fixed PythonTracebackLexer to be able to recognize SyntaxError and +KeyboardInterrupt (#360).

  • +
  • Provide one formatter class per image format, so that surprises like:

    +
    pygmentize -f gif -o foo.gif foo.py
    +
    +
    +

    creating a PNG file are avoided.

    +
  • +
  • Actually use the font_size option of the image formatter.

  • +
  • Fixed numpy lexer that it doesn’t listen for *.py any longer.

  • +
  • Fixed HTML formatter so that text options can be Unicode +strings (#371).

  • +
  • Unified Diff lexer supports the “udiff” alias now.

  • +
  • Fixed a few issues in Scala lexer (#367).

  • +
  • RubyConsoleLexer now supports simple prompt mode (#363).

  • +
  • JavascriptLexer is smarter about what constitutes a regex (#356).

  • +
  • Add Applescript lexer, thanks to Andreas Amann (#330).

  • +
  • Make the codetags more strict about matching words (#368).

  • +
  • NginxConfLexer is a little more accurate on mimetypes and +variables (#370).

  • +
+
+
+

Version 0.11.1¶

+

(released Aug 24, 2008)

+
    +
  • Fixed a Jython compatibility issue in pygments.unistring (#358).

  • +
+
+
+

Version 0.11¶

+

(codename Straußenei, released Aug 23, 2008)

+

Many thanks go to Tim Hatch for writing or integrating most of the bug +fixes and new features.

+
    +
  • Lexers added:

    +
      +
    • Nasm-style assembly language, thanks to delroth

    • +
    • YAML, thanks to Kirill Simonov

    • +
    • ActionScript 3, thanks to Pierre Bourdon

    • +
    • Cheetah/Spitfire templates, thanks to Matt Good

    • +
    • Lighttpd config files

    • +
    • Nginx config files

    • +
    • Gnuplot plotting scripts

    • +
    • Clojure

    • +
    • POV-Ray scene files

    • +
    • Sqlite3 interactive console sessions

    • +
    • Scala source files, thanks to Krzysiek Goj

    • +
    +
  • +
  • Lexers improved:

    +
      +
    • C lexer highlights standard library functions now and supports C99 +types.

    • +
    • Bash lexer now correctly highlights heredocs without preceding +whitespace.

    • +
    • Vim lexer now highlights hex colors properly and knows a couple +more keywords.

    • +
    • Irc logs lexer now handles xchat’s default time format (#340) and +correctly highlights lines ending in >.

    • +
    • Support more delimiters for perl regular expressions (#258).

    • +
    • ObjectiveC lexer now supports 2.0 features.

    • +
    +
  • +
  • Added “Visual Studio” style.

  • +
  • Updated markdown processor to Markdown 1.7.

  • +
  • Support roman/sans/mono style defs and use them in the LaTeX +formatter.

  • +
  • The RawTokenFormatter is no longer registered to *.raw and it’s +documented that tokenization with this lexer may raise exceptions.

  • +
  • New option hl_lines to HTML formatter, to highlight certain +lines.

  • +
  • New option prestyles to HTML formatter.

  • +
  • New option -g to pygmentize, to allow lexer guessing based on +filetext (can be slowish, so file extensions are still checked +first).

  • +
  • guess_lexer() now makes its decision much faster due to a cache +of whether data is xml-like (a check which is used in several +versions of analyse_text(). Several lexers also have more +accurate analyse_text() now.

  • +
+
+
+

Version 0.10¶

+

(codename Malzeug, released May 06, 2008)

+
    +
  • Lexers added:

    +
      +
    • Io

    • +
    • Smalltalk

    • +
    • Darcs patches

    • +
    • Tcl

    • +
    • Matlab

    • +
    • Matlab sessions

    • +
    • FORTRAN

    • +
    • XSLT

    • +
    • tcsh

    • +
    • NumPy

    • +
    • Python 3

    • +
    • S, S-plus, R statistics languages

    • +
    • Logtalk

    • +
    +
  • +
  • In the LatexFormatter, the commandprefix option is now by default +‘PY’ instead of ‘C’, since the latter resulted in several collisions +with other packages. Also, the special meaning of the arg +argument to get_style_defs() was removed.

  • +
  • Added ImageFormatter, to format code as PNG, JPG, GIF or BMP. +(Needs the Python Imaging Library.)

  • +
  • Support doc comments in the PHP lexer.

  • +
  • Handle format specifications in the Perl lexer.

  • +
  • Fix comment handling in the Batch lexer.

  • +
  • Add more file name extensions for the C++, INI and XML lexers.

  • +
  • Fixes in the IRC and MuPad lexers.

  • +
  • Fix function and interface name highlighting in the Java lexer.

  • +
  • Fix at-rule handling in the CSS lexer.

  • +
  • Handle KeyboardInterrupts gracefully in pygmentize.

  • +
  • Added BlackWhiteStyle.

  • +
  • Bash lexer now correctly highlights math, does not require +whitespace after semicolons, and correctly highlights boolean +operators.

  • +
  • Makefile lexer is now capable of handling BSD and GNU make syntax.

  • +
+
+
+

Version 0.9¶

+

(codename Herbstzeitlose, released Oct 14, 2007)

+
    +
  • Lexers added:

    +
      +
    • Erlang

    • +
    • ActionScript

    • +
    • Literate Haskell

    • +
    • Common Lisp

    • +
    • Various assembly languages

    • +
    • Gettext catalogs

    • +
    • Squid configuration

    • +
    • Debian control files

    • +
    • MySQL-style SQL

    • +
    • MOOCode

    • +
    +
  • +
  • Lexers improved:

    +
      +
    • Greatly improved the Haskell and OCaml lexers.

    • +
    • Improved the Bash lexer’s handling of nested constructs.

    • +
    • The C# and Java lexers exhibited abysmal performance with some +input code; this should now be fixed.

    • +
    • The IRC logs lexer is now able to colorize weechat logs too.

    • +
    • The Lua lexer now recognizes multi-line comments.

    • +
    • Fixed bugs in the D and MiniD lexer.

    • +
    +
  • +
  • The encoding handling of the command line mode (pygmentize) was +enhanced. You shouldn’t get UnicodeErrors from it anymore if you +don’t give an encoding option.

  • +
  • Added a -P option to the command line mode which can be used to +give options whose values contain commas or equals signs.

  • +
  • Added 256-color terminal formatter.

  • +
  • Added an experimental SVG formatter.

  • +
  • Added the lineanchors option to the HTML formatter, thanks to +Ian Charnas for the idea.

  • +
  • Gave the line numbers table a CSS class in the HTML formatter.

  • +
  • Added a Vim 7-like style.

  • +
+
+
+

Version 0.8.1¶

+

(released Jun 27, 2007)

+
    +
  • Fixed POD highlighting in the Ruby lexer.

  • +
  • Fixed Unicode class and namespace name highlighting in the C# lexer.

  • +
  • Fixed Unicode string prefix highlighting in the Python lexer.

  • +
  • Fixed a bug in the D and MiniD lexers.

  • +
  • Fixed the included MoinMoin parser.

  • +
+
+
+

Version 0.8¶

+

(codename Maikäfer, released May 30, 2007)

+
    +
  • Lexers added:

    +
      +
    • Haskell, thanks to Adam Blinkinsop

    • +
    • Redcode, thanks to Adam Blinkinsop

    • +
    • D, thanks to Kirk McDonald

    • +
    • MuPad, thanks to Christopher Creutzig

    • +
    • MiniD, thanks to Jarrett Billingsley

    • +
    • Vim Script, by Tim Hatch

    • +
    +
  • +
  • The HTML formatter now has a second line-numbers mode in which it +will just integrate the numbers in the same <pre> tag as the +code.

  • +
  • The CSharpLexer now is Unicode-aware, which means that it has an +option that can be set so that it correctly lexes Unicode +identifiers allowed by the C# specs.

  • +
  • Added a RaiseOnErrorTokenFilter that raises an exception when the +lexer generates an error token, and a VisibleWhitespaceFilter that +converts whitespace (spaces, tabs, newlines) into visible +characters.

  • +
  • Fixed the do_insertions() helper function to yield correct +indices.

  • +
  • The ReST lexer now automatically highlights source code blocks in +“.. sourcecode:: language” and “.. code:: language” directive +blocks.

  • +
  • Improved the default style (thanks to Tiberius Teng). The old +default is still available as the “emacs” style (which was an alias +before).

  • +
  • The get_style_defs method of HTML formatters now uses the +cssclass option as the default selector if it was given.

  • +
  • Improved the ReST and Bash lexers a bit.

  • +
  • Fixed a few bugs in the Makefile and Bash lexers, thanks to Tim +Hatch.

  • +
  • Fixed a bug in the command line code that disallowed -O options +when using the -S option.

  • +
  • Fixed a bug in the RawTokenFormatter.

  • +
+
+
+

Version 0.7.1¶

+

(released Feb 15, 2007)

+
    +
  • Fixed little highlighting bugs in the Python, Java, Scheme and +Apache Config lexers.

  • +
  • Updated the included manpage.

  • +
  • Included a built version of the documentation in the source tarball.

  • +
+
+
+

Version 0.7¶

+

(codename Faschingskrapfn, released Feb 14, 2007)

+
    +
  • Added a MoinMoin parser that uses Pygments. With it, you get +Pygments highlighting in Moin Wiki pages.

  • +
  • Changed the exception raised if no suitable lexer, formatter etc. is +found in one of the get_*_by_* functions to a custom exception, +pygments.util.ClassNotFound. It is, however, a subclass of +ValueError in order to retain backwards compatibility.

  • +
  • Added a -H command line option which can be used to get the +docstring of a lexer, formatter or filter.

  • +
  • Made the handling of lexers and formatters more consistent. The +aliases and filename patterns of formatters are now attributes on +them.

  • +
  • Added an OCaml lexer, thanks to Adam Blinkinsop.

  • +
  • Made the HTML formatter more flexible, and easily subclassable in +order to make it easy to implement custom wrappers, e.g. alternate +line number markup. See the documentation.

  • +
  • Added an outencoding option to all formatters, making it possible +to override the encoding (which is used by lexers and formatters) +when using the command line interface. Also, if using the terminal +formatter and the output file is a terminal and has an encoding +attribute, use it if no encoding is given.

  • +
  • Made it possible to just drop style modules into the styles +subpackage of the Pygments installation.

  • +
  • Added a “state” keyword argument to the using helper.

  • +
  • Added a commandprefix option to the LatexFormatter which allows +to control how the command names are constructed.

  • +
  • Added quite a few new lexers, thanks to Tim Hatch:

    +
      +
    • Java Server Pages

    • +
    • Windows batch files

    • +
    • Trac Wiki markup

    • +
    • Python tracebacks

    • +
    • ReStructuredText

    • +
    • Dylan

    • +
    • and the Befunge esoteric programming language (yay!)

    • +
    +
  • +
  • Added Mako lexers by Ben Bangert.

  • +
  • Added “fruity” style, another dark background originally vim-based +theme.

  • +
  • Added sources.list lexer by Dennis Kaarsemaker.

  • +
  • Added token stream filters, and a pygmentize option to use them.

  • +
  • Changed behavior of in Operator for tokens.

  • +
  • Added mimetypes for all lexers.

  • +
  • Fixed some problems lexing Python strings.

  • +
  • Fixed tickets: #167, #178, #179, #180, #185, #201.

  • +
+
+
+

Version 0.6¶

+

(codename Zimtstern, released Dec 20, 2006)

+
    +
  • Added option for the HTML formatter to write the CSS to an external +file in “full document” mode.

  • +
  • Added RTF formatter.

  • +
  • Added Bash and Apache configuration lexers (thanks to Tim Hatch).

  • +
  • Improved guessing methods for various lexers.

  • +
  • Added @media support to CSS lexer (thanks to Tim Hatch).

  • +
  • Added a Groff lexer (thanks to Tim Hatch).

  • +
  • License change to BSD.

  • +
  • Added lexers for the Myghty template language.

  • +
  • Added a Scheme lexer (thanks to Marek Kubica).

  • +
  • Added some functions to iterate over existing lexers, formatters and +lexers.

  • +
  • The HtmlFormatter’s get_style_defs() can now take a list as an +argument to generate CSS with multiple prefixes.

  • +
  • Support for guessing input encoding added.

  • +
  • Encoding support added: all processing is now done with Unicode +strings, input and output are converted from and optionally to byte +strings (see the encoding option of lexers and formatters).

  • +
  • Some improvements in the C(++) lexers handling comments and line +continuations.

  • +
+
+
+

Version 0.5.1¶

+

(released Oct 30, 2006)

+
    +
  • Fixed traceback in pygmentize -L (thanks to Piotr Ozarowski).

  • +
+
+
+

Version 0.5¶

+

(codename PyKleur, released Oct 30, 2006)

+
    +
  • Initial public release.

  • +
+
+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/docs/cmdline.html b/doc/_build/html/docs/cmdline.html new file mode 100644 index 0000000..5f08230 --- /dev/null +++ b/doc/_build/html/docs/cmdline.html @@ -0,0 +1,282 @@ + + + + + + + Command Line Interface — Pygments + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Command Line Interface¶

+

You can use Pygments from the shell, provided you installed the +pygmentize script:

+
$ pygmentize test.py
+print "Hello World"
+
+
+

will print the file test.py to standard output, using the Python lexer +(inferred from the file name extension) and the terminal formatter (because +you didn’t give an explicit formatter name).

+

If you want HTML output:

+
$ pygmentize -f html -l python -o test.html test.py
+
+
+

As you can see, the -l option explicitly selects a lexer. As seen above, if you +give an input file name and it has an extension that Pygments recognizes, you can +omit this option.

+

The -o option gives an output file name. If it is not given, output is +written to stdout.

+

The -f option selects a formatter (as with -l, it can also be omitted +if an output file name is given and has a supported extension). +If no output file name is given and -f is omitted, the +TerminalFormatter is used.

+

The above command could therefore also be given as:

+
$ pygmentize -o test.html test.py
+
+
+

To create a full HTML document, including line numbers and stylesheet (using the +“emacs” style), highlighting the Python file test.py to test.html:

+
$ pygmentize -O full,style=emacs -o test.html test.py
+
+
+
+

Options and filters¶

+

Lexer and formatter options can be given using the -O option:

+
$ pygmentize -f html -O style=colorful,linenos=1 -l python test.py
+
+
+

Be sure to enclose the option string in quotes if it contains any special shell +characters, such as spaces or expansion wildcards like *. If an option +expects a list value, separate the list entries with spaces (you’ll have to +quote the option value in this case too, so that the shell doesn’t split it).

+

Since the -O option argument is split at commas and expects the split values +to be of the form name=value, you can’t give an option value that contains +commas or equals signs. Therefore, an option -P is provided (as of Pygments +0.9) that works like -O but can only pass one option per -P. Its value +can then contain all characters:

+
$ pygmentize -P "heading=Pygments, the Python highlighter" ...
+
+
+

Filters are added to the token stream using the -F option:

+
$ pygmentize -f html -l pascal -F keywordcase:case=upper main.pas
+
+
+

As you see, options for the filter are given after a colon. As for -O, the +filter name and options must be one shell word, so there may not be any spaces +around the colon.

+
+
+

Generating styles¶

+

Formatters normally don’t output full style information. For example, the HTML +formatter by default only outputs <span> tags with class attributes. +Therefore, there’s a special -S option for generating style definitions. +Usage is as follows:

+
$ pygmentize -f html -S colorful -a .syntax
+
+
+

generates a CSS style sheet (because you selected the HTML formatter) for +the “colorful” style prepending a “.syntax” selector to all style rules.

+

For an explanation what -a means for a particular formatter, look for the arg argument for the formatter’s +get_style_defs() method.

+
+
+

Getting lexer names¶

+
+

New in version 1.0.

+
+

The -N option guesses a lexer name for a given filename, so that

+
$ pygmentize -N setup.py
+
+
+

will print out python. It won’t highlight anything yet. If no specific +lexer is known for that filename, text is printed.

+
+
+

Custom Lexers and Formatters¶

+
+

New in version 2.2.

+
+

The -x flag enables custom lexers and formatters to be loaded +from files relative to the current directory. Create a file with a class named +CustomLexer or CustomFormatter, then specify it on the command line:

+
$ pygmentize -l your_lexer.py -f your_formatter.py -x
+
+
+

You can also specify the name of your class with a colon:

+
$ pygmentize -l your_lexer.py:SomeLexer -x
+
+
+

For more information, see the Pygments documentation on Lexer development.

+
+
+

Getting help¶

+

The -L option lists lexers, formatters, along with their short +names and supported file name extensions, styles and filters. If you want to see +only one category, give it as an argument:

+
$ pygmentize -L filters
+
+
+

will list only all installed filters.

+

The -H option will give you detailed information (the same that can be found +in this documentation) about a lexer, formatter or filter. Usage is as follows:

+
$ pygmentize -H formatter html
+
+
+

will print the help for the HTML formatter, while

+
$ pygmentize -H lexer python
+
+
+

will print the help for the Python lexer, etc.

+
+
+

A note on encodings¶

+
+

New in version 0.9.

+
+

Pygments tries to be smart regarding encodings in the formatting process:

+
    +
  • If you give an encoding option, it will be used as the input and +output encoding.

  • +
  • If you give an outencoding option, it will override encoding +as the output encoding.

  • +
  • If you give an inencoding option, it will override encoding +as the input encoding.

  • +
  • If you don’t give an encoding and have given an output file, the default +encoding for lexer and formatter is the terminal encoding or the default +locale encoding of the system. As a last resort, latin1 is used (which +will pass through all non-ASCII characters).

  • +
  • If you don’t give an encoding and haven’t given an output file (that means +output is written to the console), the default encoding for lexer and +formatter is the terminal encoding (sys.stdout.encoding).

  • +
+
+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/docs/filterdevelopment.html b/doc/_build/html/docs/filterdevelopment.html new file mode 100644 index 0000000..29e9c97 --- /dev/null +++ b/doc/_build/html/docs/filterdevelopment.html @@ -0,0 +1,194 @@ + + + + + + + Write your own filter — Pygments + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Write your own filter¶

+
+

New in version 0.7.

+
+

Writing own filters is very easy. All you have to do is to subclass +the Filter class and override the filter method. Additionally a +filter is instantiated with some keyword arguments you can use to +adjust the behavior of your filter.

+
+

Subclassing Filters¶

+

As an example, we write a filter that converts all Name.Function tokens +to normal Name tokens to make the output less colorful.

+
from pygments.util import get_bool_opt
+from pygments.token import Name
+from pygments.filter import Filter
+
+class UncolorFilter(Filter):
+
+    def __init__(self, **options):
+        Filter.__init__(self, **options)
+        self.class_too = get_bool_opt(options, 'classtoo')
+
+    def filter(self, lexer, stream):
+        for ttype, value in stream:
+            if ttype is Name.Function or (self.class_too and
+                                          ttype is Name.Class):
+                ttype = Name
+            yield ttype, value
+
+
+

Some notes on the lexer argument: that can be quite confusing since it doesn’t +need to be a lexer instance. If a filter was added by using the add_filter() +function of lexers, that lexer is registered for the filter. In that case +lexer will refer to the lexer that has registered the filter. It can be used +to access options passed to a lexer. Because it could be None you always have +to check for that case if you access it.

+
+
+

Using a decorator¶

+

You can also use the simplefilter decorator from the pygments.filter module:

+
from pygments.util import get_bool_opt
+from pygments.token import Name
+from pygments.filter import simplefilter
+
+
+@simplefilter
+def uncolor(self, lexer, stream, options):
+    class_too = get_bool_opt(options, 'classtoo')
+    for ttype, value in stream:
+        if ttype is Name.Function or (class_too and
+                                      ttype is Name.Class):
+            ttype = Name
+        yield ttype, value
+
+
+

The decorator automatically subclasses an internal filter class and uses the +decorated function as a method for filtering. (That’s why there is a self +argument that you probably won’t end up using in the method.)

+
+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/docs/filters.html b/doc/_build/html/docs/filters.html new file mode 100644 index 0000000..fd2b579 --- /dev/null +++ b/doc/_build/html/docs/filters.html @@ -0,0 +1,324 @@ + + + + + + + Filters — Pygments + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Filters¶

+
+

New in version 0.7.

+
+

You can filter token streams coming from lexers to improve or annotate the +output. For example, you can highlight special words in comments, convert +keywords to upper or lowercase to enforce a style guide etc.

+

To apply a filter, you can use the add_filter() method of a lexer:

+
>>> from pygments.lexers import PythonLexer
+>>> l = PythonLexer()
+>>> # add a filter given by a string and options
+>>> l.add_filter('codetagify', case='lower')
+>>> l.filters
+[<pygments.filters.CodeTagFilter object at 0xb785decc>]
+>>> from pygments.filters import KeywordCaseFilter
+>>> # or give an instance
+>>> l.add_filter(KeywordCaseFilter(case='lower'))
+
+
+

The add_filter() method takes keyword arguments which are forwarded to +the constructor of the filter.

+

To get a list of all registered filters by name, you can use the +get_all_filters() function from the pygments.filters module that returns an +iterable for all known filters.

+

If you want to write your own filter, have a look at Write your own filter.

+
+

Builtin Filters¶

+
+
+class CodeTagFilter¶
+
+
Name
+

codetagify

+
+
+

Highlight special code tags in comments and docstrings.

+

Options accepted:

+
+
codetagslist of strings

A list of strings that are flagged as code tags. The default is to +highlight XXX, TODO, BUG and NOTE.

+
+
+
+ +
+
+class KeywordCaseFilter¶
+
+
Name
+

keywordcase

+
+
+

Convert keywords to lowercase or uppercase or capitalize them, which +means first letter uppercase, rest lowercase.

+

This can be useful e.g. if you highlight Pascal code and want to adapt the +code to your styleguide.

+

Options accepted:

+
+
casestring

The casing to convert keywords to. Must be one of 'lower', +'upper' or 'capitalize'. The default is 'lower'.

+
+
+
+ +
+
+class NameHighlightFilter¶
+
+
Name
+

highlight

+
+
+

Highlight a normal Name (and Name.*) token with a different token type.

+

Example:

+
filter = NameHighlightFilter(
+    names=['foo', 'bar', 'baz'],
+    tokentype=Name.Function,
+)
+
+
+

This would highlight the names “foo”, “bar” and “baz” +as functions. Name.Function is the default token type.

+

Options accepted:

+
+
nameslist of strings

A list of names that should be given the different token type. +There is no default.

+
+
tokentypeTokenType or string

A token type or a string containing a token type name that is +used for highlighting the strings in names. The default is +Name.Function.

+
+
+
+ +
+
+class RaiseOnErrorTokenFilter¶
+
+
Name
+

raiseonerror

+
+
+

Raise an exception when the lexer generates an error token.

+

Options accepted:

+
+
excclassException class

The exception class to raise. +The default is pygments.filters.ErrorToken.

+
+
+
+

New in version 0.8.

+
+
+ +
+
+class VisibleWhitespaceFilter¶
+
+
Name
+

whitespace

+
+
+

Convert tabs, newlines and/or spaces to visible characters.

+

Options accepted:

+
+
spacesstring or bool

If this is a one-character string, spaces will be replaces by this string. +If it is another true value, spaces will be replaced by · (unicode +MIDDLE DOT). If it is a false value, spaces will not be replaced. The +default is False.

+
+
tabsstring or bool

The same as for spaces, but the default replacement character is » +(unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value +is False. Note: this will not work if the tabsize option for the +lexer is nonzero, as tabs will already have been expanded then.

+
+
tabsizeint

If tabs are to be replaced by this filter (see the tabs option), this +is the total number of characters that a tab should be expanded to. +The default is 8.

+
+
newlinesstring or bool

The same as for spaces, but the default replacement character is ¶ +(unicode PILCROW SIGN). The default value is False.

+
+
wstokentypebool

If true, give whitespace the special Whitespace token type. This allows +styling the visible whitespace differently (e.g. greyed out), but it can +disrupt background colors. The default is True.

+
+
+
+

New in version 0.8.

+
+
+ +
+
+class GobbleFilter¶
+
+
Name
+

gobble

+
+
+

Gobbles source code lines (eats initial characters).

+

This filter drops the first n characters off every line of code. This +may be useful when the source code fed to the lexer is indented by a fixed +amount of space that isn’t desired in the output.

+

Options accepted:

+
+
nint

The number of characters to gobble.

+
+
+
+

New in version 1.2.

+
+
+ +
+
+class TokenMergeFilter¶
+
+
Name
+

tokenmerge

+
+
+

Merges consecutive tokens with the same token type in the output +stream of a lexer.

+
+

New in version 1.2.

+
+
+ +
+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/docs/formatterdevelopment.html b/doc/_build/html/docs/formatterdevelopment.html new file mode 100644 index 0000000..43bbd36 --- /dev/null +++ b/doc/_build/html/docs/formatterdevelopment.html @@ -0,0 +1,281 @@ + + + + + + + Write your own formatter — Pygments + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Write your own formatter¶

+

As well as creating your own lexer, writing a new +formatter for Pygments is easy and straightforward.

+

A formatter is a class that is initialized with some keyword arguments (the +formatter options) and that must provides a format() method. +Additionally a formatter should provide a get_style_defs() method that +returns the style definitions from the style in a form usable for the +formatter’s output format.

+
+

Quickstart¶

+

The most basic formatter shipped with Pygments is the NullFormatter. It just +sends the value of a token to the output stream:

+
from pygments.formatter import Formatter
+
+class NullFormatter(Formatter):
+    def format(self, tokensource, outfile):
+        for ttype, value in tokensource:
+            outfile.write(value)
+
+
+

As you can see, the format() method is passed two parameters: tokensource +and outfile. The first is an iterable of (token_type, value) tuples, +the latter a file like object with a write() method.

+

Because the formatter is that basic it doesn’t overwrite the get_style_defs() +method.

+
+
+

Styles¶

+

Styles aren’t instantiated but their metaclass provides some class functions +so that you can access the style definitions easily.

+

Styles are iterable and yield tuples in the form (ttype, d) where ttype +is a token and d is a dict with the following keys:

+
+
'color'

Hexadecimal color value (eg: 'ff0000' for red) or None if not +defined.

+
+
'bold'

True if the value should be bold

+
+
'italic'

True if the value should be italic

+
+
'underline'

True if the value should be underlined

+
+
'bgcolor'

Hexadecimal color value for the background (eg: 'eeeeeee' for light +gray) or None if not defined.

+
+
'border'

Hexadecimal color value for the border (eg: '0000aa' for a dark +blue) or None for no border.

+
+
+

Additional keys might appear in the future, formatters should ignore all keys +they don’t support.

+
+
+

HTML 3.2 Formatter¶

+

For an more complex example, let’s implement a HTML 3.2 Formatter. We don’t +use CSS but inline markup (<u>, <font>, etc). Because this isn’t good +style this formatter isn’t in the standard library ;-)

+
from pygments.formatter import Formatter
+
+class OldHtmlFormatter(Formatter):
+
+    def __init__(self, **options):
+        Formatter.__init__(self, **options)
+
+        # create a dict of (start, end) tuples that wrap the
+        # value of a token so that we can use it in the format
+        # method later
+        self.styles = {}
+
+        # we iterate over the `_styles` attribute of a style item
+        # that contains the parsed style values.
+        for token, style in self.style:
+            start = end = ''
+            # a style item is a tuple in the following form:
+            # colors are readily specified in hex: 'RRGGBB'
+            if style['color']:
+                start += '<font color="#%s">' % style['color']
+                end = '</font>' + end
+            if style['bold']:
+                start += '<b>'
+                end = '</b>' + end
+            if style['italic']:
+                start += '<i>'
+                end = '</i>' + end
+            if style['underline']:
+                start += '<u>'
+                end = '</u>' + end
+            self.styles[token] = (start, end)
+
+    def format(self, tokensource, outfile):
+        # lastval is a string we use for caching
+        # because it's possible that an lexer yields a number
+        # of consecutive tokens with the same token type.
+        # to minimize the size of the generated html markup we
+        # try to join the values of same-type tokens here
+        lastval = ''
+        lasttype = None
+
+        # wrap the whole output with <pre>
+        outfile.write('<pre>')
+
+        for ttype, value in tokensource:
+            # if the token type doesn't exist in the stylemap
+            # we try it with the parent of the token type
+            # eg: parent of Token.Literal.String.Double is
+            # Token.Literal.String
+            while ttype not in self.styles:
+                ttype = ttype.parent
+            if ttype == lasttype:
+                # the current token type is the same of the last
+                # iteration. cache it
+                lastval += value
+            else:
+                # not the same token as last iteration, but we
+                # have some data in the buffer. wrap it with the
+                # defined style and write it to the output file
+                if lastval:
+                    stylebegin, styleend = self.styles[lasttype]
+                    outfile.write(stylebegin + lastval + styleend)
+                # set lastval/lasttype to current values
+                lastval = value
+                lasttype = ttype
+
+        # if something is left in the buffer, write it to the
+        # output file, then close the opened <pre> tag
+        if lastval:
+            stylebegin, styleend = self.styles[lasttype]
+            outfile.write(stylebegin + lastval + styleend)
+        outfile.write('</pre>\n')
+
+
+

The comments should explain it. Again, this formatter doesn’t override the +get_style_defs() method. If we would have used CSS classes instead of +inline HTML markup, we would need to generate the CSS first. For that +purpose the get_style_defs() method exists:

+
+
+

Generating Style Definitions¶

+

Some formatters like the LatexFormatter and the HtmlFormatter don’t +output inline markup but reference either macros or css classes. Because +the definitions of those are not part of the output, the get_style_defs() +method exists. It is passed one parameter (if it’s used and how it’s used +is up to the formatter) and has to return a string or None.

+
+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/docs/formatters.html b/doc/_build/html/docs/formatters.html new file mode 100644 index 0000000..5266a3d --- /dev/null +++ b/doc/_build/html/docs/formatters.html @@ -0,0 +1,976 @@ + + + + + + + Available formatters — Pygments + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Available formatters¶

+

This page lists all builtin formatters.

+
+

Common options¶

+

All formatters support these options:

+
+
encoding

If given, must be an encoding name (such as "utf-8"). This will +be used to convert the token strings (which are Unicode strings) +to byte strings in the output (default: None). +It will also be written in an encoding declaration suitable for the +document format if the full option is given (e.g. a meta +content-type directive in HTML or an invocation of the inputenc +package in LaTeX).

+

If this is "" or None, Unicode strings will be written +to the output file, which most file-like objects do not support. +For example, pygments.highlight() will return a Unicode string if +called with no outfile argument and a formatter that has encoding +set to None because it uses a StringIO.StringIO object that +supports Unicode arguments to write(). Using a regular file object +wouldn’t work.

+
+

New in version 0.6.

+
+
+
outencoding

When using Pygments from the command line, any encoding option given is +passed to the lexer and the formatter. This is sometimes not desirable, +for example if you want to set the input encoding to "guess". +Therefore, outencoding has been introduced which overrides encoding +for the formatter if given.

+
+

New in version 0.7.

+
+
+
+
+
+

Formatter classes¶

+

All these classes are importable from pygments.formatters.

+
+
+class BBCodeFormatter¶
+
+
Short names
+

bbcode, bb

+
+
Filenames
+

None

+
+
+

Format tokens with BBcodes. These formatting codes are used by many +bulletin boards, so you can highlight your sourcecode with pygments before +posting it there.

+

This formatter has no support for background colors and borders, as there +are no common BBcode tags for that.

+

Some board systems (e.g. phpBB) don’t support colors in their [code] tag, +so you can’t use the highlighting together with that tag. +Text in a [code] tag usually is shown with a monospace font (which this +formatter can do with the monofont option) and no spaces (which you +need for indentation) are removed.

+

Additional options accepted:

+
+
style

The style to use, can be a string or a Style subclass (default: +'default').

+
+
codetag

If set to true, put the output into [code] tags (default: +false)

+
+
monofont

If set to true, add a tag to show the code with a monospace font +(default: false).

+
+
+
+ +
+
+class BmpImageFormatter¶
+
+
Short names
+

bmp, bitmap

+
+
Filenames
+

*.bmp

+
+
+

Create a bitmap image from source code. This uses the Python Imaging Library to +generate a pixmap from the source code.

+
+

New in version 1.0.

+
+
+ +
+
+class GifImageFormatter¶
+
+
Short names
+

gif

+
+
Filenames
+

*.gif

+
+
+

Create a GIF image from source code. This uses the Python Imaging Library to +generate a pixmap from the source code.

+
+

New in version 1.0.

+
+
+ +
+
+class HtmlFormatter¶
+
+
Short names
+

html

+
+
Filenames
+

*.html, *.htm

+
+
+

Format tokens as HTML 4 <span> tags within a <pre> tag, wrapped +in a <div> tag. The <div>’s CSS class can be set by the cssclass +option.

+

If the linenos option is set to "table", the <pre> is +additionally wrapped inside a <table> which has one row and two +cells: one containing the line numbers and one containing the code. +Example:

+
<div class="highlight" >
+<table><tr>
+  <td class="linenos" title="click to toggle"
+    onclick="with (this.firstChild.style)
+             { display = (display == '') ? 'none' : '' }">
+    <pre>1
+    2</pre>
+  </td>
+  <td class="code">
+    <pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar):
+      <span class="Ke">pass</span>
+    </pre>
+  </td>
+</tr></table></div>
+
+
+

(whitespace added to improve clarity).

+

Wrapping can be disabled using the nowrap option.

+

A list of lines can be specified using the hl_lines option to make these +lines highlighted (as of Pygments 0.11).

+

With the full option, a complete HTML 4 document is output, including +the style definitions inside a <style> tag, or in a separate file if +the cssfile option is given.

+

When tagsfile is set to the path of a ctags index file, it is used to +generate hyperlinks from names to their definition. You must enable +lineanchors and run ctags with the -n option for this to work. The +python-ctags module from PyPI must be installed to use this feature; +otherwise a RuntimeError will be raised.

+

The get_style_defs(arg=’’) method of a HtmlFormatter returns a string +containing CSS rules for the CSS classes used by the formatter. The +argument arg can be used to specify additional CSS selectors that +are prepended to the classes. A call fmter.get_style_defs(‘td .code’) +would result in the following CSS classes:

+
td .code .kw { font-weight: bold; color: #00FF00 }
+td .code .cm { color: #999999 }
+...
+
+
+

If you have Pygments 0.6 or higher, you can also pass a list or tuple to the +get_style_defs() method to request multiple prefixes for the tokens:

+
formatter.get_style_defs(['div.syntax pre', 'pre.syntax'])
+
+
+

The output would then look like this:

+
div.syntax pre .kw,
+pre.syntax .kw { font-weight: bold; color: #00FF00 }
+div.syntax pre .cm,
+pre.syntax .cm { color: #999999 }
+...
+
+
+

Additional options accepted:

+
+
nowrap

If set to True, don’t wrap the tokens at all, not even inside a <pre> +tag. This disables most other options (default: False).

+
+
full

Tells the formatter to output a “full” document, i.e. a complete +self-contained document (default: False).

+
+
title

If full is true, the title that should be used to caption the +document (default: '').

+
+
style

The style to use, can be a string or a Style subclass (default: +'default'). This option has no effect if the cssfile +and noclobber_cssfile option are given and the file specified in +cssfile exists.

+
+
noclasses

If set to true, token <span> tags will not use CSS classes, but +inline styles. This is not recommended for larger pieces of code since +it increases output size by quite a bit (default: False).

+
+
classprefix

Since the token types use relatively short class names, they may clash +with some of your own class names. In this case you can use the +classprefix option to give a string to prepend to all Pygments-generated +CSS class names for token types. +Note that this option also affects the output of get_style_defs().

+
+
cssclass

CSS class for the wrapping <div> tag (default: 'highlight'). +If you set this option, the default selector for get_style_defs() +will be this class.

+
+

New in version 0.9: If you select the 'table' line numbers, the wrapping table will +have a CSS class of this string plus 'table', the default is +accordingly 'highlighttable'.

+
+
+
cssstyles

Inline CSS styles for the wrapping <div> tag (default: '').

+
+
prestyles

Inline CSS styles for the <pre> tag (default: '').

+
+

New in version 0.11.

+
+
+
cssfile

If the full option is true and this option is given, it must be the +name of an external file. If the filename does not include an absolute +path, the file’s path will be assumed to be relative to the main output +file’s path, if the latter can be found. The stylesheet is then written +to this file instead of the HTML file.

+
+

New in version 0.6.

+
+
+
noclobber_cssfile

If cssfile is given and the specified file exists, the css file will +not be overwritten. This allows the use of the full option in +combination with a user specified css file. Default is False.

+
+

New in version 1.1.

+
+
+
linenos

If set to 'table', output line numbers as a table with two cells, +one containing the line numbers, the other the whole code. This is +copy-and-paste-friendly, but may cause alignment problems with some +browsers or fonts. If set to 'inline', the line numbers will be +integrated in the <pre> tag that contains the code (that setting +is new in Pygments 0.8).

+

For compatibility with Pygments 0.7 and earlier, every true value +except 'inline' means the same as 'table' (in particular, that +means also True).

+

The default value is False, which means no line numbers at all.

+

Note: with the default (“table”) line number mechanism, the line +numbers and code can have different line heights in Internet Explorer +unless you give the enclosing <pre> tags an explicit line-height +CSS property (you get the default line spacing with line-height: +125%).

+
+
hl_lines

Specify a list of lines to be highlighted.

+
+

New in version 0.11.

+
+
+
linenostart

The line number for the first line (default: 1).

+
+
linenostep

If set to a number n > 1, only every nth line number is printed.

+
+
linenospecial

If set to a number n > 0, every nth line number is given the CSS +class "special" (default: 0).

+
+
nobackground

If set to True, the formatter won’t output the background color +for the wrapping element (this automatically defaults to False +when there is no wrapping element [eg: no argument for the +get_syntax_defs method given]) (default: False).

+
+

New in version 0.6.

+
+
+
lineseparator

This string is output between lines of code. It defaults to "\n", +which is enough to break a line inside <pre> tags, but you can +e.g. set it to "<br>" to get HTML line breaks.

+
+

New in version 0.7.

+
+
+
lineanchors

If set to a nonempty string, e.g. foo, the formatter will wrap each +output line in an anchor tag with a name of foo-linenumber. +This allows easy linking to certain lines.

+
+

New in version 0.9.

+
+
+
linespans

If set to a nonempty string, e.g. foo, the formatter will wrap each +output line in a span tag with an id of foo-linenumber. +This allows easy access to lines via javascript.

+
+

New in version 1.6.

+
+
+
anchorlinenos

If set to True, will wrap line numbers in <a> tags. Used in +combination with linenos and lineanchors.

+
+
tagsfile

If set to the path of a ctags file, wrap names in anchor tags that +link to their definitions. lineanchors should be used, and the +tags file should specify line numbers (see the -n option to ctags).

+
+

New in version 1.6.

+
+
+
tagurlformat

A string formatting pattern used to generate links to ctags definitions. +Available variables are %(path)s, %(fname)s and %(fext)s. +Defaults to an empty string, resulting in just #prefix-number links.

+
+

New in version 1.6.

+
+
+
filename

A string used to generate a filename when rendering <pre> blocks, +for example if displaying source code.

+
+

New in version 2.1.

+
+
+
wrapcode

Wrap the code inside <pre> blocks using <code>, as recommended +by the HTML5 specification.

+
+

New in version 2.4.

+
+
+
+

Subclassing the HTML formatter

+
+

New in version 0.7.

+
+

The HTML formatter is now built in a way that allows easy subclassing, thus +customizing the output HTML code. The format() method calls +self._format_lines() which returns a generator that yields tuples of (1, +line), where the 1 indicates that the line is a line of the +formatted source code.

+

If the nowrap option is set, the generator is the iterated over and the +resulting HTML is output.

+

Otherwise, format() calls self.wrap(), which wraps the generator with +other generators. These may add some HTML code to the one generated by +_format_lines(), either by modifying the lines generated by the latter, +then yielding them again with (1, line), and/or by yielding other HTML +code before or after the lines, with (0, html). The distinction between +source lines and other code makes it possible to wrap the generator multiple +times.

+

The default wrap() implementation adds a <div> and a <pre> tag.

+

A custom HtmlFormatter subclass could look like this:

+
class CodeHtmlFormatter(HtmlFormatter):
+
+    def wrap(self, source, outfile):
+        return self._wrap_code(source)
+
+    def _wrap_code(self, source):
+        yield 0, '<code>'
+        for i, t in source:
+            if i == 1:
+                # it's a line of formatted code
+                t += '<br>'
+            yield i, t
+        yield 0, '</code>'
+
+
+

This results in wrapping the formatted lines with a <code> tag, where the +source lines are broken using <br> tags.

+

After calling wrap(), the format() method also adds the “line numbers” +and/or “full document” wrappers if the respective options are set. Then, all +HTML yielded by the wrapped generator is output.

+
+ +
+
+class IRCFormatter¶
+
+
Short names
+

irc, IRC

+
+
Filenames
+

None

+
+
+

Format tokens with IRC color sequences

+

The get_style_defs() method doesn’t do anything special since there is +no support for common styles.

+

Options accepted:

+
+
bg

Set to "light" or "dark" depending on the terminal’s background +(default: "light").

+
+
colorscheme

A dictionary mapping token types to (lightbg, darkbg) color names or +None (default: None = use builtin colorscheme).

+
+
linenos

Set to True to have line numbers in the output as well +(default: False = no line numbers).

+
+
+
+ +
+
+class ImageFormatter¶
+
+
Short names
+

img, IMG, png

+
+
Filenames
+

*.png

+
+
+

Create a PNG image from source code. This uses the Python Imaging Library to +generate a pixmap from the source code.

+
+

New in version 0.10.

+
+

Additional options accepted:

+
+
image_format

An image format to output to that is recognised by PIL, these include:

+
    +
  • “PNG” (default)

  • +
  • “JPEG”

  • +
  • “BMP”

  • +
  • “GIF”

  • +
+
+
line_pad

The extra spacing (in pixels) between each line of text.

+

Default: 2

+
+
font_name

The font name to be used as the base font from which others, such as +bold and italic fonts will be generated. This really should be a +monospace font to look sane.

+
+
Default: “Courier New” on Windows, “Menlo” on Mac OS, and

“DejaVu Sans Mono” on *nix

+
+
+
+
font_size

The font size in points to be used.

+

Default: 14

+
+
image_pad

The padding, in pixels to be used at each edge of the resulting image.

+

Default: 10

+
+
line_numbers

Whether line numbers should be shown: True/False

+

Default: True

+
+
line_number_start

The line number of the first line.

+

Default: 1

+
+
line_number_step

The step used when printing line numbers.

+

Default: 1

+
+
line_number_bg

The background colour (in “#123456” format) of the line number bar, or +None to use the style background color.

+

Default: “#eed”

+
+
line_number_fg

The text color of the line numbers (in “#123456”-like format).

+

Default: “#886”

+
+
line_number_chars

The number of columns of line numbers allowable in the line number +margin.

+

Default: 2

+
+
line_number_bold

Whether line numbers will be bold: True/False

+

Default: False

+
+
line_number_italic

Whether line numbers will be italicized: True/False

+

Default: False

+
+
line_number_separator

Whether a line will be drawn between the line number area and the +source code area: True/False

+

Default: True

+
+
line_number_pad

The horizontal padding (in pixels) between the line number margin, and +the source code area.

+

Default: 6

+
+
hl_lines

Specify a list of lines to be highlighted.

+
+

New in version 1.2.

+
+

Default: empty list

+
+
hl_color

Specify the color for highlighting lines.

+
+

New in version 1.2.

+
+

Default: highlight color of the selected style

+
+
+
+ +
+
+class JpgImageFormatter¶
+
+
Short names
+

jpg, jpeg

+
+
Filenames
+

*.jpg

+
+
+

Create a JPEG image from source code. This uses the Python Imaging Library to +generate a pixmap from the source code.

+
+

New in version 1.0.

+
+
+ +
+
+class LatexFormatter¶
+
+
Short names
+

latex, tex

+
+
Filenames
+

*.tex

+
+
+

Format tokens as LaTeX code. This needs the fancyvrb and color +standard packages.

+

Without the full option, code is formatted as one Verbatim +environment, like this:

+
\begin{Verbatim}[commandchars=\\\{\}]
+\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
+    \PY{k}{pass}
+\end{Verbatim}
+
+
+

The special command used here (\PY) and all the other macros it needs +are output by the get_style_defs method.

+

With the full option, a complete LaTeX document is output, including +the command definitions in the preamble.

+

The get_style_defs() method of a LatexFormatter returns a string +containing \def commands defining the macros needed inside the +Verbatim environments.

+

Additional options accepted:

+
+
style

The style to use, can be a string or a Style subclass (default: +'default').

+
+
full

Tells the formatter to output a “full” document, i.e. a complete +self-contained document (default: False).

+
+
title

If full is true, the title that should be used to caption the +document (default: '').

+
+
docclass

If the full option is enabled, this is the document class to use +(default: 'article').

+
+
preamble

If the full option is enabled, this can be further preamble commands, +e.g. \usepackage (default: '').

+
+
linenos

If set to True, output line numbers (default: False).

+
+
linenostart

The line number for the first line (default: 1).

+
+
linenostep

If set to a number n > 1, only every nth line number is printed.

+
+
verboptions

Additional options given to the Verbatim environment (see the fancyvrb +docs for possible values) (default: '').

+
+
commandprefix

The LaTeX commands used to produce colored output are constructed +using this prefix and some letters (default: 'PY').

+
+

New in version 0.7.

+
+
+

Changed in version 0.10: The default is now 'PY' instead of 'C'.

+
+
+
texcomments

If set to True, enables LaTeX comment lines. That is, LaTex markup +in comment tokens is not escaped so that LaTeX can render it (default: +False).

+
+

New in version 1.2.

+
+
+
mathescape

If set to True, enables LaTeX math mode escape in comments. That +is, '$...$' inside a comment will trigger math mode (default: +False).

+
+

New in version 1.2.

+
+
+
escapeinside

If set to a string of length 2, enables escaping to LaTeX. Text +delimited by these 2 characters is read as LaTeX code and +typeset accordingly. It has no effect in string literals. It has +no effect in comments if texcomments or mathescape is +set. (default: '').

+
+

New in version 2.0.

+
+
+
envname

Allows you to pick an alternative environment name replacing Verbatim. +The alternate environment still has to support Verbatim’s option syntax. +(default: 'Verbatim').

+
+

New in version 2.0.

+
+
+
+
+ +
+
+class NullFormatter¶
+
+
Short names
+

text, null

+
+
Filenames
+

*.txt

+
+
+

Output the text unchanged without any formatting.

+
+ +
+
+class RawTokenFormatter¶
+
+
Short names
+

raw, tokens

+
+
Filenames
+

*.raw

+
+
+

Format tokens as a raw representation for storing token streams.

+

The format is tokentype<TAB>repr(tokenstring)\n. The output can later +be converted to a token stream with the RawTokenLexer, described in the +lexer list.

+

Only two options are accepted:

+
+
compress

If set to 'gz' or 'bz2', compress the output with the given +compression algorithm after encoding (default: '').

+
+
error_color

If set to a color name, highlight error tokens using that color. If +set but with no value, defaults to 'red'.

+
+

New in version 0.11.

+
+
+
+
+ +
+
+class RtfFormatter¶
+
+
Short names
+

rtf

+
+
Filenames
+

*.rtf

+
+
+

Format tokens as RTF markup. This formatter automatically outputs full RTF +documents with color information and other useful stuff. Perfect for Copy and +Paste into Microsoft(R) Word(R) documents.

+

Please note that encoding and outencoding options are ignored. +The RTF format is ASCII natively, but handles unicode characters correctly +thanks to escape sequences.

+
+

New in version 0.6.

+
+

Additional options accepted:

+
+
style

The style to use, can be a string or a Style subclass (default: +'default').

+
+
fontface

The used font family, for example Bitstream Vera Sans. Defaults to +some generic font which is supposed to have fixed width.

+
+
fontsize

Size of the font used. Size is specified in half points. The +default is 24 half-points, giving a size 12 font.

+
+

New in version 2.0.

+
+
+
+
+ +
+
+class SvgFormatter¶
+
+
Short names
+

svg

+
+
Filenames
+

*.svg

+
+
+

Format tokens as an SVG graphics file. This formatter is still experimental. +Each line of code is a <text> element with explicit x and y +coordinates containing <tspan> elements with the individual token styles.

+

By default, this formatter outputs a full SVG document including doctype +declaration and the <svg> root element.

+
+

New in version 0.9.

+
+

Additional options accepted:

+
+
nowrap

Don’t wrap the SVG <text> elements in <svg><g> elements and +don’t add a XML declaration and a doctype. If true, the fontfamily +and fontsize options are ignored. Defaults to False.

+
+
fontfamily

The value to give the wrapping <g> element’s font-family +attribute, defaults to "monospace".

+
+
fontsize

The value to give the wrapping <g> element’s font-size +attribute, defaults to "14px".

+
+
xoffset

Starting offset in X direction, defaults to 0.

+
+
yoffset

Starting offset in Y direction, defaults to the font size if it is given +in pixels, or 20 else. (This is necessary since text coordinates +refer to the text baseline, not the top edge.)

+
+
ystep

Offset to add to the Y coordinate for each subsequent line. This should +roughly be the text size plus 5. It defaults to that value if the text +size is given in pixels, or 25 else.

+
+
spacehack

Convert spaces in the source to &#160;, which are non-breaking +spaces. SVG provides the xml:space attribute to control how +whitespace inside tags is handled, in theory, the preserve value +could be used to keep all whitespace as-is. However, many current SVG +viewers don’t obey that rule, so this option is provided as a workaround +and defaults to True.

+
+
+
+ +
+
+class Terminal256Formatter¶
+
+
Short names
+

terminal256, console256, 256

+
+
Filenames
+

None

+
+
+

Format tokens with ANSI color sequences, for output in a 256-color +terminal or console. Like in TerminalFormatter color sequences +are terminated at newlines, so that paging the output works correctly.

+

The formatter takes colors from a style defined by the style option +and converts them to nearest ANSI 256-color escape sequences. Bold and +underline attributes from the style are preserved (and displayed).

+
+

New in version 0.9.

+
+
+

Changed in version 2.2: If the used style defines foreground colors in the form #ansi*, then +Terminal256Formatter will map these to non extended foreground color. +See Terminal Styles for more information.

+
+
+

Changed in version 2.4: The ANSI color names have been updated with names that are easier to +understand and align with colornames of other projects and terminals. +See this table for more information.

+
+

Options accepted:

+
+
style

The style to use, can be a string or a Style subclass (default: +'default').

+
+
+
+ +
+
+class TerminalFormatter¶
+
+
Short names
+

terminal, console

+
+
Filenames
+

None

+
+
+

Format tokens with ANSI color sequences, for output in a text console. +Color sequences are terminated at newlines, so that paging the output +works correctly.

+

The get_style_defs() method doesn’t do anything special since there is +no support for common styles.

+

Options accepted:

+
+
bg

Set to "light" or "dark" depending on the terminal’s background +(default: "light").

+
+
colorscheme

A dictionary mapping token types to (lightbg, darkbg) color names or +None (default: None = use builtin colorscheme).

+
+
linenos

Set to True to have line numbers on the terminal output as well +(default: False = no line numbers).

+
+
+
+ +
+
+class TerminalTrueColorFormatter¶
+
+
Short names
+

terminal16m, console16m, 16m

+
+
Filenames
+

None

+
+
+

Format tokens with ANSI color sequences, for output in a true-color +terminal or console. Like in TerminalFormatter color sequences +are terminated at newlines, so that paging the output works correctly.

+
+

New in version 2.1.

+
+

Options accepted:

+
+
style

The style to use, can be a string or a Style subclass (default: +'default').

+
+
+
+ +
+
+class TestcaseFormatter¶
+
+
Short names
+

testcase

+
+
Filenames
+

None

+
+
+

Format tokens as appropriate for a new testcase.

+
+

New in version 2.0.

+
+
+ +
+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/docs/index.html b/doc/_build/html/docs/index.html new file mode 100644 index 0000000..23e0745 --- /dev/null +++ b/doc/_build/html/docs/index.html @@ -0,0 +1,179 @@ + + + + + + + Pygments documentation — Pygments + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Pygments documentation¶

+

Starting with Pygments

+ +

Builtin components

+ +

Reference

+ +

Hacking for Pygments

+ +

Hints and tricks

+ +

About Pygments

+ +

If you find bugs or have suggestions for the documentation, please submit them +on GitHub <https://github.com/pygments/pygments>.

+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/docs/integrate.html b/doc/_build/html/docs/integrate.html new file mode 100644 index 0000000..22ac261 --- /dev/null +++ b/doc/_build/html/docs/integrate.html @@ -0,0 +1,166 @@ + + + + + + + Using Pygments in various scenarios — Pygments + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Using Pygments in various scenarios¶

+
+

Markdown¶

+

Since Pygments 0.9, the distribution ships Markdown preprocessor sample code +that uses Pygments to render source code in +external/markdown-processor.py. You can copy and adapt it to your +liking.

+
+
+

TextMate¶

+

Antonio Cangiano has created a Pygments bundle for TextMate that allows to +colorize code via a simple menu option. It can be found here.

+
+
+

Bash completion¶

+

The source distribution contains a file external/pygments.bashcomp that +sets up completion for the pygmentize command in bash.

+
+
+

Wrappers for other languages¶

+

These libraries provide Pygments highlighting for users of other languages +than Python:

+ +
+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/docs/java.html b/doc/_build/html/docs/java.html new file mode 100644 index 0000000..4dbb483 --- /dev/null +++ b/doc/_build/html/docs/java.html @@ -0,0 +1,184 @@ + + + + + + + Use Pygments in Java — Pygments + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Use Pygments in Java¶

+

Thanks to Jython it is possible to use Pygments in +Java.

+

This page is a simple tutorial to get an idea of how this works. You can +then look at the Jython documentation for more +advanced uses.

+

Since version 1.5, Pygments is deployed on Maven Central as a JAR, as is Jython +which makes it a lot easier to create a Java project.

+

Here is an example of a Maven pom.xml file for a +project running Pygments:

+
<?xml version="1.0" encoding="UTF-8"?>
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                             http://maven.apache.org/maven-v4_0_0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>example</groupId>
+  <artifactId>example</artifactId>
+  <version>1.0-SNAPSHOT</version>
+  <dependencies>
+    <dependency>
+      <groupId>org.python</groupId>
+      <artifactId>jython-standalone</artifactId>
+      <version>2.5.3</version>
+    </dependency>
+    <dependency>
+      <groupId>org.pygments</groupId>
+      <artifactId>pygments</artifactId>
+      <version>1.5</version>
+      <scope>runtime</scope>
+    </dependency>
+  </dependencies>
+</project>
+
+
+

The following Java example:

+
PythonInterpreter interpreter = new PythonInterpreter();
+
+// Set a variable with the content you want to work with
+interpreter.set("code", code);
+
+// Simple use Pygments as you would in Python
+interpreter.exec("from pygments import highlight\n"
+    + "from pygments.lexers import PythonLexer\n"
+    + "from pygments.formatters import HtmlFormatter\n"
+    + "\nresult = highlight(code, PythonLexer(), HtmlFormatter())");
+
+// Get the result that has been set in a variable
+System.out.println(interpreter.get("result", String.class));
+
+
+

will print something like:

+
<div class="highlight">
+<pre><span class="k">print</span> <span class="s">&quot;Hello World&quot;</span></pre>
+</div>
+
+
+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/docs/lexerdevelopment.html b/doc/_build/html/docs/lexerdevelopment.html new file mode 100644 index 0000000..75ede3b --- /dev/null +++ b/doc/_build/html/docs/lexerdevelopment.html @@ -0,0 +1,774 @@ + + + + + + + Write your own lexer — Pygments + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Write your own lexer¶

+

If a lexer for your favorite language is missing in the Pygments package, you +can easily write your own and extend Pygments.

+

All you need can be found inside the pygments.lexer module. As you can +read in the API documentation, a lexer is a class that is +initialized with some keyword arguments (the lexer options) and that provides a +get_tokens_unprocessed() method which is given a string or unicode +object with the data to lex.

+

The get_tokens_unprocessed() method must return an iterator or iterable +containing tuples in the form (index, token, value). Normally you don’t +need to do this since there are base lexers that do most of the work and that +you can subclass.

+
+

RegexLexer¶

+

The lexer base class used by almost all of Pygments’ lexers is the +RegexLexer. This class allows you to define lexing rules in terms of +regular expressions for different states.

+

States are groups of regular expressions that are matched against the input +string at the current position. If one of these expressions matches, a +corresponding action is performed (such as yielding a token with a specific +type, or changing state), the current position is set to where the last match +ended and the matching process continues with the first regex of the current +state.

+

Lexer states are kept on a stack: each time a new state is entered, the new +state is pushed onto the stack. The most basic lexers (like the DiffLexer) +just need one state.

+

Each state is defined as a list of tuples in the form (regex, action, +new_state) where the last item is optional. In the most basic form, action +is a token type (like Name.Builtin). That means: When regex matches, emit a +token with the match text and type tokentype and push new_state on the state +stack. If the new state is '#pop', the topmost state is popped from the +stack instead. To pop more than one state, use '#pop:2' and so on. +'#push' is a synonym for pushing the current state on the stack.

+

The following example shows the DiffLexer from the builtin lexers. Note that +it contains some additional attributes name, aliases and filenames which +aren’t required for a lexer. They are used by the builtin lexer lookup +functions.

+
from pygments.lexer import RegexLexer
+from pygments.token import *
+
+class DiffLexer(RegexLexer):
+    name = 'Diff'
+    aliases = ['diff']
+    filenames = ['*.diff']
+
+    tokens = {
+        'root': [
+            (r' .*\n', Text),
+            (r'\+.*\n', Generic.Inserted),
+            (r'-.*\n', Generic.Deleted),
+            (r'@.*\n', Generic.Subheading),
+            (r'Index.*\n', Generic.Heading),
+            (r'=.*\n', Generic.Heading),
+            (r'.*\n', Text),
+        ]
+    }
+
+
+

As you can see this lexer only uses one state. When the lexer starts scanning +the text, it first checks if the current character is a space. If this is true +it scans everything until newline and returns the data as a Text token (which +is the “no special highlighting” token).

+

If this rule doesn’t match, it checks if the current char is a plus sign. And +so on.

+

If no rule matches at the current position, the current char is emitted as an +Error token that indicates a lexing error, and the position is increased by +one.

+
+
+

Adding and testing a new lexer¶

+

The easiest way to use a new lexer is to use Pygments’ support for loading +the lexer from a file relative to your current directory.

+

First, change the name of your lexer class to CustomLexer:

+
from pygments.lexer import RegexLexer
+from pygments.token import *
+
+class CustomLexer(RegexLexer):
+    """All your lexer code goes here!"""
+
+
+

Then you can load the lexer from the command line with the additional +flag -x:

+
$ pygmentize -l your_lexer_file.py -x
+
+
+

To specify a class name other than CustomLexer, append it with a colon:

+
$ pygmentize -l your_lexer.py:SomeLexer -x
+
+
+

Or, using the Python API:

+
# For a lexer named CustomLexer
+your_lexer = load_lexer_from_file(filename, **options)
+
+# For a lexer named MyNewLexer
+your_named_lexer = load_lexer_from_file(filename, "MyNewLexer", **options)
+
+
+

When loading custom lexers and formatters, be extremely careful to use only +trusted files; Pygments will perform the equivalent of eval on them.

+

If you only want to use your lexer with the Pygments API, you can import and +instantiate the lexer yourself, then pass it to pygments.highlight().

+

To prepare your new lexer for inclusion in the Pygments distribution, so that it +will be found when passing filenames or lexer aliases from the command line, you +have to perform the following steps.

+

First, change to the current directory containing the Pygments source code. You +will need to have either an unpacked source tarball, or (preferably) a copy +cloned from GitHub.

+
$ cd .../pygments-main
+
+
+

Select a matching module under pygments/lexers, or create a new module for +your lexer class.

+

Next, make sure the lexer is known from outside of the module. All modules in +the pygments.lexers package specify __all__. For example, +esoteric.py sets:

+
__all__ = ['BrainfuckLexer', 'BefungeLexer', ...]
+
+
+

Add the name of your lexer class to this list (or create the list if your lexer +is the only class in the module).

+

Finally the lexer can be made publicly known by rebuilding the lexer mapping:

+
$ make mapfiles
+
+
+

To test the new lexer, store an example file with the proper extension in +tests/examplefiles. For example, to test your DiffLexer, add a +tests/examplefiles/example.diff containing a sample diff output.

+

Now you can use pygmentize to render your example to HTML:

+
$ ./pygmentize -O full -f html -o /tmp/example.html tests/examplefiles/example.diff
+
+
+

Note that this explicitly calls the pygmentize in the current directory +by preceding it with ./. This ensures your modifications are used. +Otherwise a possibly already installed, unmodified version without your new +lexer would have been called from the system search path ($PATH).

+

To view the result, open /tmp/example.html in your browser.

+

Once the example renders as expected, you should run the complete test suite:

+
$ make test
+
+
+

It also tests that your lexer fulfills the lexer API and certain invariants, +such as that the concatenation of all token text is the same as the input text.

+
+
+

Regex Flags¶

+

You can either define regex flags locally in the regex (r'(?x)foo bar') or +globally by adding a flags attribute to your lexer class. If no attribute is +defined, it defaults to re.MULTILINE. For more information about regular +expression flags see the page about regular expressions in the Python +documentation.

+
+
+

Scanning multiple tokens at once¶

+

So far, the action element in the rule tuple of regex, action and state has +been a single token type. Now we look at the first of several other possible +values.

+

Here is a more complex lexer that highlights INI files. INI files consist of +sections, comments and key = value pairs:

+
from pygments.lexer import RegexLexer, bygroups
+from pygments.token import *
+
+class IniLexer(RegexLexer):
+    name = 'INI'
+    aliases = ['ini', 'cfg']
+    filenames = ['*.ini', '*.cfg']
+
+    tokens = {
+        'root': [
+            (r'\s+', Text),
+            (r';.*?$', Comment),
+            (r'\[.*?\]$', Keyword),
+            (r'(.*?)(\s*)(=)(\s*)(.*?)$',
+             bygroups(Name.Attribute, Text, Operator, Text, String))
+        ]
+    }
+
+
+

The lexer first looks for whitespace, comments and section names. Later it +looks for a line that looks like a key, value pair, separated by an '=' +sign, and optional whitespace.

+

The bygroups helper yields each capturing group in the regex with a different +token type. First the Name.Attribute token, then a Text token for the +optional whitespace, after that a Operator token for the equals sign. Then a +Text token for the whitespace again. The rest of the line is returned as +String.

+

Note that for this to work, every part of the match must be inside a capturing +group (a (...)), and there must not be any nested capturing groups. If you +nevertheless need a group, use a non-capturing group defined using this syntax: +(?:some|words|here) (note the ?: after the beginning parenthesis).

+

If you find yourself needing a capturing group inside the regex which shouldn’t +be part of the output but is used in the regular expressions for backreferencing +(eg: r'(<(foo|bar)>)(.*?)(</\2>)'), you can pass None to the bygroups +function and that group will be skipped in the output.

+
+
+

Changing states¶

+

Many lexers need multiple states to work as expected. For example, some +languages allow multiline comments to be nested. Since this is a recursive +pattern it’s impossible to lex just using regular expressions.

+

Here is a lexer that recognizes C++ style comments (multi-line with /* */ +and single-line with // until end of line):

+
from pygments.lexer import RegexLexer
+from pygments.token import *
+
+class CppCommentLexer(RegexLexer):
+    name = 'Example Lexer with states'
+
+    tokens = {
+        'root': [
+            (r'[^/]+', Text),
+            (r'/\*', Comment.Multiline, 'comment'),
+            (r'//.*?$', Comment.Singleline),
+            (r'/', Text)
+        ],
+        'comment': [
+            (r'[^*/]', Comment.Multiline),
+            (r'/\*', Comment.Multiline, '#push'),
+            (r'\*/', Comment.Multiline, '#pop'),
+            (r'[*/]', Comment.Multiline)
+        ]
+    }
+
+
+

This lexer starts lexing in the 'root' state. It tries to match as much as +possible until it finds a slash ('/'). If the next character after the slash +is an asterisk ('*') the RegexLexer sends those two characters to the +output stream marked as Comment.Multiline and continues lexing with the rules +defined in the 'comment' state.

+

If there wasn’t an asterisk after the slash, the RegexLexer checks if it’s a +Singleline comment (i.e. followed by a second slash). If this also wasn’t the +case it must be a single slash, which is not a comment starter (the separate +regex for a single slash must also be given, else the slash would be marked as +an error token).

+

Inside the 'comment' state, we do the same thing again. Scan until the +lexer finds a star or slash. If it’s the opening of a multiline comment, push +the 'comment' state on the stack and continue scanning, again in the +'comment' state. Else, check if it’s the end of the multiline comment. If +yes, pop one state from the stack.

+

Note: If you pop from an empty stack you’ll get an IndexError. (There is an +easy way to prevent this from happening: don’t '#pop' in the root state).

+

If the RegexLexer encounters a newline that is flagged as an error token, the +stack is emptied and the lexer continues scanning in the 'root' state. This +can help producing error-tolerant highlighting for erroneous input, e.g. when a +single-line string is not closed.

+
+
+

Advanced state tricks¶

+

There are a few more things you can do with states:

+
    +
  • You can push multiple states onto the stack if you give a tuple instead of a +simple string as the third item in a rule tuple. For example, if you want to +match a comment containing a directive, something like:

    +
    /* <processing directive>    rest of comment */
    +
    +
    +

    you can use this rule:

    +
    tokens = {
    +    'root': [
    +        (r'/\* <', Comment, ('comment', 'directive')),
    +        ...
    +    ],
    +    'directive': [
    +        (r'[^>]*', Comment.Directive),
    +        (r'>', Comment, '#pop'),
    +    ],
    +    'comment': [
    +        (r'[^*]+', Comment),
    +        (r'\*/', Comment, '#pop'),
    +        (r'\*', Comment),
    +    ]
    +}
    +
    +
    +

    When this encounters the above sample, first 'comment' and 'directive' +are pushed onto the stack, then the lexer continues in the directive state +until it finds the closing >, then it continues in the comment state until +the closing */. Then, both states are popped from the stack again and +lexing continues in the root state.

    +
    +

    New in version 0.9: The tuple can contain the special '#push' and '#pop' (but not +'#pop:n') directives.

    +
    +
  • +
  • You can include the rules of a state in the definition of another. This is +done by using include from pygments.lexer:

    +
    from pygments.lexer import RegexLexer, bygroups, include
    +from pygments.token import *
    +
    +class ExampleLexer(RegexLexer):
    +    tokens = {
    +        'comments': [
    +            (r'/\*.*?\*/', Comment),
    +            (r'//.*?\n', Comment),
    +        ],
    +        'root': [
    +            include('comments'),
    +            (r'(function )(\w+)( {)',
    +             bygroups(Keyword, Name, Keyword), 'function'),
    +            (r'.', Text),
    +        ],
    +        'function': [
    +            (r'[^}/]+', Text),
    +            include('comments'),
    +            (r'/', Text),
    +            (r'\}', Keyword, '#pop'),
    +        ]
    +    }
    +
    +
    +

    This is a hypothetical lexer for a language that consist of functions and +comments. Because comments can occur at toplevel and in functions, we need +rules for comments in both states. As you can see, the include helper saves +repeating rules that occur more than once (in this example, the state +'comment' will never be entered by the lexer, as it’s only there to be +included in 'root' and 'function').

    +
  • +
  • Sometimes, you may want to “combine” a state from existing ones. This is +possible with the combined helper from pygments.lexer.

    +

    If you, instead of a new state, write combined('state1', 'state2') as the +third item of a rule tuple, a new anonymous state will be formed from state1 +and state2 and if the rule matches, the lexer will enter this state.

    +

    This is not used very often, but can be helpful in some cases, such as the +PythonLexer’s string literal processing.

    +
  • +
  • If you want your lexer to start lexing in a different state you can modify the +stack by overriding the get_tokens_unprocessed() method:

    +
    from pygments.lexer import RegexLexer
    +
    +class ExampleLexer(RegexLexer):
    +    tokens = {...}
    +
    +    def get_tokens_unprocessed(self, text, stack=('root', 'otherstate')):
    +        for item in RegexLexer.get_tokens_unprocessed(self, text, stack):
    +            yield item
    +
    +
    +

    Some lexers like the PhpLexer use this to make the leading <?php +preprocessor comments optional. Note that you can crash the lexer easily by +putting values into the stack that don’t exist in the token map. Also +removing 'root' from the stack can result in strange errors!

    +
  • +
  • In some lexers, a state should be popped if anything is encountered that isn’t +matched by a rule in the state. You could use an empty regex at the end of +the state list, but Pygments provides a more obvious way of spelling that: +default('#pop') is equivalent to ('', Text, '#pop').

    +
    +

    New in version 2.0.

    +
    +
  • +
+
+
+

Subclassing lexers derived from RegexLexer¶

+
+

New in version 1.6.

+
+

Sometimes multiple languages are very similar, but should still be lexed by +different lexer classes.

+

When subclassing a lexer derived from RegexLexer, the tokens dictionaries +defined in the parent and child class are merged. For example:

+
from pygments.lexer import RegexLexer, inherit
+from pygments.token import *
+
+class BaseLexer(RegexLexer):
+    tokens = {
+        'root': [
+            ('[a-z]+', Name),
+            (r'/\*', Comment, 'comment'),
+            ('"', String, 'string'),
+            ('\s+', Text),
+        ],
+        'string': [
+            ('[^"]+', String),
+            ('"', String, '#pop'),
+        ],
+        'comment': [
+            ...
+        ],
+    }
+
+class DerivedLexer(BaseLexer):
+    tokens = {
+        'root': [
+            ('[0-9]+', Number),
+            inherit,
+        ],
+        'string': [
+            (r'[^"\\]+', String),
+            (r'\\.', String.Escape),
+            ('"', String, '#pop'),
+        ],
+    }
+
+
+

The BaseLexer defines two states, lexing names and strings. The +DerivedLexer defines its own tokens dictionary, which extends the definitions +of the base lexer:

+
    +
  • The “root” state has an additional rule and then the special object inherit, +which tells Pygments to insert the token definitions of the parent class at +that point.

  • +
  • The “string” state is replaced entirely, since there is not inherit rule.

  • +
  • The “comment” state is inherited entirely.

  • +
+
+
+

Using multiple lexers¶

+

Using multiple lexers for the same input can be tricky. One of the easiest +combination techniques is shown here: You can replace the action entry in a rule +tuple with a lexer class. The matched text will then be lexed with that lexer, +and the resulting tokens will be yielded.

+

For example, look at this stripped-down HTML lexer:

+
from pygments.lexer import RegexLexer, bygroups, using
+from pygments.token import *
+from pygments.lexers.javascript import JavascriptLexer
+
+class HtmlLexer(RegexLexer):
+    name = 'HTML'
+    aliases = ['html']
+    filenames = ['*.html', '*.htm']
+
+    flags = re.IGNORECASE | re.DOTALL
+    tokens = {
+        'root': [
+            ('[^<&]+', Text),
+            ('&.*?;', Name.Entity),
+            (r'<\s*script\s*', Name.Tag, ('script-content', 'tag')),
+            (r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'),
+            (r'<\s*/\s*[a-zA-Z0-9:]+\s*>', Name.Tag),
+        ],
+        'script-content': [
+            (r'(.+?)(<\s*/\s*script\s*>)',
+             bygroups(using(JavascriptLexer), Name.Tag),
+             '#pop'),
+        ]
+    }
+
+
+

Here the content of a <script> tag is passed to a newly created instance of +a JavascriptLexer and not processed by the HtmlLexer. This is done using +the using helper that takes the other lexer class as its parameter.

+

Note the combination of bygroups and using. This makes sure that the +content up to the </script> end tag is processed by the JavascriptLexer, +while the end tag is yielded as a normal token with the Name.Tag type.

+

Also note the (r'<\s*script\s*', Name.Tag, ('script-content', 'tag')) rule. +Here, two states are pushed onto the state stack, 'script-content' and +'tag'. That means that first 'tag' is processed, which will lex +attributes and the closing >, then the 'tag' state is popped and the +next state on top of the stack will be 'script-content'.

+

Since you cannot refer to the class currently being defined, use this +(imported from pygments.lexer) to refer to the current lexer class, i.e. +using(this). This construct may seem unnecessary, but this is often the +most obvious way of lexing arbitrary syntax between fixed delimiters without +introducing deeply nested states.

+

The using() helper has a special keyword argument, state, which works as +follows: if given, the lexer to use initially is not in the "root" state, +but in the state given by this argument. This does not work with advanced +RegexLexer subclasses such as ExtendedRegexLexer (see below).

+

Any other keywords arguments passed to using() are added to the keyword +arguments used to create the lexer.

+
+
+

Delegating Lexer¶

+

Another approach for nested lexers is the DelegatingLexer which is for example +used for the template engine lexers. It takes two lexers as arguments on +initialisation: a root_lexer and a language_lexer.

+

The input is processed as follows: First, the whole text is lexed with the +language_lexer. All tokens yielded with the special type of Other are +then concatenated and given to the root_lexer. The language tokens of the +language_lexer are then inserted into the root_lexer’s token stream at the +appropriate positions.

+
from pygments.lexer import DelegatingLexer
+from pygments.lexers.web import HtmlLexer, PhpLexer
+
+class HtmlPhpLexer(DelegatingLexer):
+    def __init__(self, **options):
+        super(HtmlPhpLexer, self).__init__(HtmlLexer, PhpLexer, **options)
+
+
+

This procedure ensures that e.g. HTML with template tags in it is highlighted +correctly even if the template tags are put into HTML tags or attributes.

+

If you want to change the needle token Other to something else, you can give +the lexer another token type as the third parameter:

+
DelegatingLexer.__init__(MyLexer, OtherLexer, Text, **options)
+
+
+
+
+

Callbacks¶

+

Sometimes the grammar of a language is so complex that a lexer would be unable +to process it just by using regular expressions and stacks.

+

For this, the RegexLexer allows callbacks to be given in rule tuples, instead +of token types (bygroups and using are nothing else but preimplemented +callbacks). The callback must be a function taking two arguments:

+
    +
  • the lexer itself

  • +
  • the match object for the last matched rule

  • +
+

The callback must then return an iterable of (or simply yield) (index, +tokentype, value) tuples, which are then just passed through by +get_tokens_unprocessed(). The index here is the position of the token in +the input string, tokentype is the normal token type (like Name.Builtin), +and value the associated part of the input string.

+

You can see an example here:

+
from pygments.lexer import RegexLexer
+from pygments.token import Generic
+
+class HypotheticLexer(RegexLexer):
+
+    def headline_callback(lexer, match):
+        equal_signs = match.group(1)
+        text = match.group(2)
+        yield match.start(), Generic.Headline, equal_signs + text + equal_signs
+
+    tokens = {
+        'root': [
+            (r'(=+)(.*?)(\1)', headline_callback)
+        ]
+    }
+
+
+

If the regex for the headline_callback matches, the function is called with +the match object. Note that after the callback is done, processing continues +normally, that is, after the end of the previous match. The callback has no +possibility to influence the position.

+

There are not really any simple examples for lexer callbacks, but you can see +them in action e.g. in the SMLLexer class in ml.py.

+
+
+

The ExtendedRegexLexer class¶

+

The RegexLexer, even with callbacks, unfortunately isn’t powerful enough for +the funky syntax rules of languages such as Ruby.

+

But fear not; even then you don’t have to abandon the regular expression +approach: Pygments has a subclass of RegexLexer, the ExtendedRegexLexer. +All features known from RegexLexers are available here too, and the tokens are +specified in exactly the same way, except for one detail:

+

The get_tokens_unprocessed() method holds its internal state data not as local +variables, but in an instance of the pygments.lexer.LexerContext class, and +that instance is passed to callbacks as a third argument. This means that you +can modify the lexer state in callbacks.

+

The LexerContext class has the following members:

+
    +
  • text – the input text

  • +
  • pos – the current starting position that is used for matching regexes

  • +
  • stack – a list containing the state stack

  • +
  • end – the maximum position to which regexes are matched, this defaults to +the length of text

  • +
+

Additionally, the get_tokens_unprocessed() method can be given a +LexerContext instead of a string and will then process this context instead of +creating a new one for the string argument.

+

Note that because you can set the current position to anything in the callback, +it won’t be automatically be set by the caller after the callback is finished. +For example, this is how the hypothetical lexer above would be written with the +ExtendedRegexLexer:

+
from pygments.lexer import ExtendedRegexLexer
+from pygments.token import Generic
+
+class ExHypotheticLexer(ExtendedRegexLexer):
+
+    def headline_callback(lexer, match, ctx):
+        equal_signs = match.group(1)
+        text = match.group(2)
+        yield match.start(), Generic.Headline, equal_signs + text + equal_signs
+        ctx.pos = match.end()
+
+    tokens = {
+        'root': [
+            (r'(=+)(.*?)(\1)', headline_callback)
+        ]
+    }
+
+
+

This might sound confusing (and it can really be). But it is needed, and for an +example look at the Ruby lexer in ruby.py.

+
+
+

Handling Lists of Keywords¶

+

For a relatively short list (hundreds) you can construct an optimized regular +expression directly using words() (longer lists, see next section). This +function handles a few things for you automatically, including escaping +metacharacters and Python’s first-match rather than longest-match in +alternations. Feel free to put the lists themselves in +pygments/lexers/_$lang_builtins.py (see examples there), and generated by +code if possible.

+

An example of using words() is something like:

+
from pygments.lexer import RegexLexer, words, Name
+
+class MyLexer(RegexLexer):
+
+    tokens = {
+        'root': [
+            (words(('else', 'elseif'), suffix=r'\b'), Name.Builtin),
+            (r'\w+', Name),
+        ],
+    }
+
+
+

As you can see, you can add prefix and suffix parts to the constructed +regex.

+
+
+

Modifying Token Streams¶

+

Some languages ship a lot of builtin functions (for example PHP). The total +amount of those functions differs from system to system because not everybody +has every extension installed. In the case of PHP there are over 3000 builtin +functions. That’s an incredibly huge amount of functions, much more than you +want to put into a regular expression.

+

But because only Name tokens can be function names this is solvable by +overriding the get_tokens_unprocessed() method. The following lexer +subclasses the PythonLexer so that it highlights some additional names as +pseudo keywords:

+
from pygments.lexers.python import PythonLexer
+from pygments.token import Name, Keyword
+
+class MyPythonLexer(PythonLexer):
+    EXTRA_KEYWORDS = set(('foo', 'bar', 'foobar', 'barfoo', 'spam', 'eggs'))
+
+    def get_tokens_unprocessed(self, text):
+        for index, token, value in PythonLexer.get_tokens_unprocessed(self, text):
+            if token is Name and value in self.EXTRA_KEYWORDS:
+                yield index, Keyword.Pseudo, value
+            else:
+                yield index, token, value
+
+
+

The PhpLexer and LuaLexer use this method to resolve builtin functions.

+
+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/docs/lexers.html b/doc/_build/html/docs/lexers.html new file mode 100644 index 0000000..9669146 --- /dev/null +++ b/doc/_build/html/docs/lexers.html @@ -0,0 +1,10359 @@ + + + + + + + Available lexers — Pygments + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Available lexers¶

+

This page lists all available builtin lexers and the options they take.

+

Currently, all lexers support these options:

+
+
stripnl

Strip leading and trailing newlines from the input (default: True)

+
+
stripall

Strip all leading and trailing whitespace from the input (default: +False).

+
+
ensurenl

Make sure that the input ends with a newline (default: True). This +is required for some lexers that consume input linewise.

+
+

New in version 1.3.

+
+
+
tabsize

If given and greater than 0, expand tabs in the input (default: 0).

+
+
encoding

If given, must be an encoding name (such as "utf-8"). This encoding +will be used to convert the input string to Unicode (if it is not already +a Unicode string). The default is "guess".

+

If this option is set to "guess", a simple UTF-8 vs. Latin-1 +detection is used, if it is set to "chardet", the +chardet library is used to +guess the encoding of the input.

+
+

New in version 0.6.

+
+
+
+

The “Short Names” field lists the identifiers that can be used with the +get_lexer_by_name() function.

+

These lexers are builtin and can be imported from pygments.lexers:

+
+

Lexers for ActionScript and MXML¶

+
+
+class pygments.lexers.actionscript.ActionScript3Lexer¶
+
+
Short names
+

as3, actionscript3

+
+
Filenames
+

*.as

+
+
MIME types
+

application/x-actionscript3, text/x-actionscript3, text/actionscript3

+
+
+

For ActionScript 3 source code.

+
+

New in version 0.11.

+
+
+ +
+
+class pygments.lexers.actionscript.ActionScriptLexer¶
+
+
Short names
+

as, actionscript

+
+
Filenames
+

*.as

+
+
MIME types
+

application/x-actionscript, text/x-actionscript, text/actionscript

+
+
+

For ActionScript source code.

+
+

New in version 0.9.

+
+
+ +
+
+class pygments.lexers.actionscript.MxmlLexer¶
+
+
Short names
+

mxml

+
+
Filenames
+

*.mxml

+
+
MIME types
+

None

+
+
+

For MXML markup. +Nested AS3 in <script> tags is highlighted by the appropriate lexer.

+
+

New in version 1.1.

+
+
+ +
+
+

Lexers for computer algebra systems¶

+
+
+class pygments.lexers.algebra.BCLexer¶
+
+
Short names
+

bc

+
+
Filenames
+

*.bc

+
+
MIME types
+

None

+
+
+

A BC lexer.

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.algebra.GAPLexer¶
+
+
Short names
+

gap

+
+
Filenames
+

*.g, *.gd, *.gi, *.gap

+
+
MIME types
+

None

+
+
+

For GAP source code.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.algebra.MathematicaLexer¶
+
+
Short names
+

mathematica, mma, nb

+
+
Filenames
+

*.nb, *.cdf, *.nbp, *.ma

+
+
MIME types
+

application/mathematica, application/vnd.wolfram.mathematica, application/vnd.wolfram.mathematica.package, application/vnd.wolfram.cdf

+
+
+

Lexer for Mathematica source code.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.algebra.MuPADLexer¶
+
+
Short names
+

mupad

+
+
Filenames
+

*.mu

+
+
MIME types
+

None

+
+
+

A MuPAD lexer. +Contributed by Christopher Creutzig <christopher@creutzig.de>.

+
+

New in version 0.8.

+
+
+ +
+
+

Lexers for AmbientTalk language¶

+
+
+class pygments.lexers.ambient.AmbientTalkLexer¶
+
+
Short names
+

at, ambienttalk, ambienttalk/2

+
+
Filenames
+

*.at

+
+
MIME types
+

text/x-ambienttalk

+
+
+

Lexer for AmbientTalk source code.

+
+

New in version 2.0.

+
+
+ +
+
+

Lexers for the AMPL language¶

+
+
+class pygments.lexers.ampl.AmplLexer¶
+
+
Short names
+

ampl

+
+
Filenames
+

*.run

+
+
MIME types
+

None

+
+
+

For AMPL source code.

+
+

New in version 2.2.

+
+
+ +
+
+

Lexers for APL¶

+
+
+class pygments.lexers.apl.APLLexer¶
+
+
Short names
+

apl

+
+
Filenames
+

*.apl

+
+
MIME types
+

None

+
+
+

A simple APL lexer.

+
+

New in version 2.0.

+
+
+ +
+ +
+

Lexers for assembly languages¶

+
+
+class pygments.lexers.asm.CObjdumpLexer¶
+
+
Short names
+

c-objdump

+
+
Filenames
+

*.c-objdump

+
+
MIME types
+

text/x-c-objdump

+
+
+

For the output of ‘objdump -Sr on compiled C files’

+
+ +
+
+class pygments.lexers.asm.Ca65Lexer¶
+
+
Short names
+

ca65

+
+
Filenames
+

*.s

+
+
MIME types
+

None

+
+
+

For ca65 assembler sources.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.asm.CppObjdumpLexer¶
+
+
Short names
+

cpp-objdump, c++-objdumb, cxx-objdump

+
+
Filenames
+

*.cpp-objdump, *.c++-objdump, *.cxx-objdump

+
+
MIME types
+

text/x-cpp-objdump

+
+
+

For the output of ‘objdump -Sr on compiled C++ files’

+
+ +
+
+class pygments.lexers.asm.DObjdumpLexer¶
+
+
Short names
+

d-objdump

+
+
Filenames
+

*.d-objdump

+
+
MIME types
+

text/x-d-objdump

+
+
+

For the output of ‘objdump -Sr on compiled D files’

+
+ +
+
+class pygments.lexers.asm.Dasm16Lexer¶
+
+
Short names
+

dasm16

+
+
Filenames
+

*.dasm16, *.dasm

+
+
MIME types
+

text/x-dasm16

+
+
+

Simple lexer for DCPU-16 Assembly

+

Check http://0x10c.com/doc/dcpu-16.txt

+
+

New in version 2.4.

+
+
+ +
+
+class pygments.lexers.asm.GasLexer¶
+
+
Short names
+

gas, asm

+
+
Filenames
+

*.s, *.S

+
+
MIME types
+

text/x-gas

+
+
+

For Gas (AT&T) assembly code.

+
+ +
+
+class pygments.lexers.asm.HsailLexer¶
+
+
Short names
+

hsail, hsa

+
+
Filenames
+

*.hsail

+
+
MIME types
+

text/x-hsail

+
+
+

For HSAIL assembly code.

+
+

New in version 2.2.

+
+
+ +
+
+class pygments.lexers.asm.LlvmLexer¶
+
+
Short names
+

llvm

+
+
Filenames
+

*.ll

+
+
MIME types
+

text/x-llvm

+
+
+

For LLVM assembly code.

+
+ +
+
+class pygments.lexers.asm.NasmLexer¶
+
+
Short names
+

nasm

+
+
Filenames
+

*.asm, *.ASM

+
+
MIME types
+

text/x-nasm

+
+
+

For Nasm (Intel) assembly code.

+
+ +
+
+class pygments.lexers.asm.NasmObjdumpLexer¶
+
+
Short names
+

objdump-nasm

+
+
Filenames
+

*.objdump-intel

+
+
MIME types
+

text/x-nasm-objdump

+
+
+

For the output of ‘objdump -d -M intel’.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.asm.ObjdumpLexer¶
+
+
Short names
+

objdump

+
+
Filenames
+

*.objdump

+
+
MIME types
+

text/x-objdump

+
+
+

For the output of ‘objdump -dr’

+
+ +
+
+class pygments.lexers.asm.TasmLexer¶
+
+
Short names
+

tasm

+
+
Filenames
+

*.asm, *.ASM, *.tasm

+
+
MIME types
+

text/x-tasm

+
+
+

For Tasm (Turbo Assembler) assembly code.

+
+ +
+
+

Lexers for automation scripting languages¶

+
+
+class pygments.lexers.automation.AutoItLexer¶
+
+
Short names
+

autoit

+
+
Filenames
+

*.au3

+
+
MIME types
+

text/x-autoit

+
+
+

For AutoIt files.

+

AutoIt is a freeware BASIC-like scripting language +designed for automating the Windows GUI and general scripting

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.automation.AutohotkeyLexer¶
+
+
Short names
+

ahk, autohotkey

+
+
Filenames
+

*.ahk, *.ahkl

+
+
MIME types
+

text/x-autohotkey

+
+
+

For autohotkey source code.

+
+

New in version 1.4.

+
+
+ +
+
+

Lexers for BASIC like languages (other than VB.net)¶

+
+
+class pygments.lexers.basic.BBCBasicLexer¶
+
+
Short names
+

bbcbasic

+
+
Filenames
+

*.bbc

+
+
MIME types
+

None

+
+
+

BBC Basic was supplied on the BBC Micro, and later Acorn RISC OS. +It is also used by BBC Basic For Windows.

+
+

New in version 2.4.

+
+
+ +
+
+class pygments.lexers.basic.BlitzBasicLexer¶
+
+
Short names
+

blitzbasic, b3d, bplus

+
+
Filenames
+

*.bb, *.decls

+
+
MIME types
+

text/x-bb

+
+
+

For BlitzBasic source code.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.basic.BlitzMaxLexer¶
+
+
Short names
+

blitzmax, bmax

+
+
Filenames
+

*.bmx

+
+
MIME types
+

text/x-bmx

+
+
+

For BlitzMax source code.

+
+

New in version 1.4.

+
+
+ +
+
+class pygments.lexers.basic.CbmBasicV2Lexer¶
+
+
Short names
+

cbmbas

+
+
Filenames
+

*.bas

+
+
MIME types
+

None

+
+
+

For CBM BASIC V2 sources.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.basic.MonkeyLexer¶
+
+
Short names
+

monkey

+
+
Filenames
+

*.monkey

+
+
MIME types
+

text/x-monkey

+
+
+

For +Monkey +source code.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.basic.QBasicLexer¶
+
+
Short names
+

qbasic, basic

+
+
Filenames
+

*.BAS, *.bas

+
+
MIME types
+

text/basic

+
+
+

For +QBasic +source code.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.basic.VBScriptLexer¶
+
+
Short names
+

vbscript

+
+
Filenames
+

*.vbs, *.VBS

+
+
MIME types
+

None

+
+
+

VBScript is scripting language that is modeled on Visual Basic.

+
+

New in version 2.4.

+
+
+ +
+
+

Lexers for BibTeX bibliography data and styles¶

+
+
+class pygments.lexers.bibtex.BSTLexer¶
+
+
Short names
+

bst, bst-pybtex

+
+
Filenames
+

*.bst

+
+
MIME types
+

None

+
+
+

A lexer for BibTeX bibliography styles.

+
+

New in version 2.2.

+
+
+ +
+
+class pygments.lexers.bibtex.BibTeXLexer¶
+
+
Short names
+

bib, bibtex

+
+
Filenames
+

*.bib

+
+
MIME types
+

text/x-bibtex

+
+
+

A lexer for BibTeX bibliography data format.

+
+

New in version 2.2.

+
+
+ +
+
+

Lexers for the Boa language¶

+
+
+class pygments.lexers.boa.BoaLexer¶
+
+
Short names
+

boa

+
+
Filenames
+

*.boa

+
+
MIME types
+

None

+
+
+

Lexer for the Boa language.

+
+

New in version 2.4.

+
+
+ +
+
+

Lexers for “business-oriented” languages¶

+
+
+class pygments.lexers.business.ABAPLexer¶
+
+
Short names
+

abap

+
+
Filenames
+

*.abap, *.ABAP

+
+
MIME types
+

text/x-abap

+
+
+

Lexer for ABAP, SAP’s integrated language.

+
+

New in version 1.1.

+
+
+ +
+
+class pygments.lexers.business.CobolFreeformatLexer¶
+
+
Short names
+

cobolfree

+
+
Filenames
+

*.cbl, *.CBL

+
+
MIME types
+

None

+
+
+

Lexer for Free format OpenCOBOL code.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.business.CobolLexer¶
+
+
Short names
+

cobol

+
+
Filenames
+

*.cob, *.COB, *.cpy, *.CPY

+
+
MIME types
+

text/x-cobol

+
+
+

Lexer for OpenCOBOL code.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.business.GoodDataCLLexer¶
+
+
Short names
+

gooddata-cl

+
+
Filenames
+

*.gdc

+
+
MIME types
+

text/x-gooddata-cl

+
+
+

Lexer for GoodData-CL +script files.

+
+

New in version 1.4.

+
+
+ +
+
+class pygments.lexers.business.MaqlLexer¶
+
+
Short names
+

maql

+
+
Filenames
+

*.maql

+
+
MIME types
+

text/x-gooddata-maql, application/x-gooddata-maql

+
+
+

Lexer for GoodData MAQL +scripts.

+
+

New in version 1.4.

+
+
+ +
+
+class pygments.lexers.business.OpenEdgeLexer¶
+
+
Short names
+

openedge, abl, progress

+
+
Filenames
+

*.p, *.cls

+
+
MIME types
+

text/x-openedge, application/x-openedge

+
+
+

Lexer for OpenEdge ABL (formerly Progress) source code.

+
+

New in version 1.5.

+
+
+ +
+
+

Lexers for C/C++ languages¶

+
+
+class pygments.lexers.c_cpp.CLexer¶
+
+
Short names
+

c

+
+
Filenames
+

*.c, *.h, *.idc

+
+
MIME types
+

text/x-chdr, text/x-csrc

+
+
+

For C source code with preprocessor directives.

+
+ +
+
+class pygments.lexers.c_cpp.CppLexer¶
+
+
Short names
+

cpp, c++

+
+
Filenames
+

*.cpp, *.hpp, *.c++, *.h++, *.cc, *.hh, *.cxx, *.hxx, *.C, *.H, *.cp, *.CPP

+
+
MIME types
+

text/x-c++hdr, text/x-c++src

+
+
+

For C++ source code with preprocessor directives.

+
+ +
+
+

Lexers for other C-like languages¶

+
+
+class pygments.lexers.c_like.ArduinoLexer¶
+
+
Short names
+

arduino

+
+
Filenames
+

*.ino

+
+
MIME types
+

text/x-arduino

+
+
+

For Arduino(tm) source.

+

This is an extension of the CppLexer, as the Arduino® Language is a superset +of C++

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.c_like.CharmciLexer¶
+
+
Short names
+

charmci

+
+
Filenames
+

*.ci

+
+
MIME types
+

None

+
+
+

For Charm++ interface files (.ci).

+
+

New in version 2.4.

+
+
+ +
+
+class pygments.lexers.c_like.ClayLexer¶
+
+
Short names
+

clay

+
+
Filenames
+

*.clay

+
+
MIME types
+

text/x-clay

+
+
+

For Clay source.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.c_like.CudaLexer¶
+
+
Short names
+

cuda, cu

+
+
Filenames
+

*.cu, *.cuh

+
+
MIME types
+

text/x-cuda

+
+
+

For NVIDIA CUDA™ +source.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.c_like.ECLexer¶
+
+
Short names
+

ec

+
+
Filenames
+

*.ec, *.eh

+
+
MIME types
+

text/x-echdr, text/x-ecsrc

+
+
+

For eC source code with preprocessor directives.

+
+

New in version 1.5.

+
+
+ +
+
+class pygments.lexers.c_like.MqlLexer¶
+
+
Short names
+

mql, mq4, mq5, mql4, mql5

+
+
Filenames
+

*.mq4, *.mq5, *.mqh

+
+
MIME types
+

text/x-mql

+
+
+

For MQL4 and +MQL5 source code.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.c_like.NesCLexer¶
+
+
Short names
+

nesc

+
+
Filenames
+

*.nc

+
+
MIME types
+

text/x-nescsrc

+
+
+

For nesC source code with preprocessor +directives.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.c_like.PikeLexer¶
+
+
Short names
+

pike

+
+
Filenames
+

*.pike, *.pmod

+
+
MIME types
+

text/x-pike

+
+
+

For Pike source code.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.c_like.SwigLexer¶
+
+
Short names
+

swig

+
+
Filenames
+

*.swg, *.i

+
+
MIME types
+

text/swig

+
+
+

For SWIG source code.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.c_like.ValaLexer¶
+
+
Short names
+

vala, vapi

+
+
Filenames
+

*.vala, *.vapi

+
+
MIME types
+

text/x-vala

+
+
+

For Vala source code with preprocessor directives.

+
+

New in version 1.1.

+
+
+ +
+
+

Lexers for the Cap’n Proto schema language¶

+
+
+class pygments.lexers.capnproto.CapnProtoLexer¶
+
+
Short names
+

capnp

+
+
Filenames
+

*.capnp

+
+
MIME types
+

None

+
+
+

For Cap’n Proto source.

+
+

New in version 2.2.

+
+
+ +
+
+

Lexer for the Chapel language¶

+
+
+class pygments.lexers.chapel.ChapelLexer¶
+
+
Short names
+

chapel, chpl

+
+
Filenames
+

*.chpl

+
+
MIME types
+

None

+
+
+

For Chapel source.

+
+

New in version 2.0.

+
+
+ +
+
+

Lexer for the Clean language¶

+
+
+class pygments.lexers.clean.CleanLexer¶
+
+
Short names
+

clean

+
+
Filenames
+

*.icl, *.dcl

+
+
MIME types
+

None

+
+
+

Lexer for the general purpose, state-of-the-art, pure and lazy functional +programming language Clean (http://clean.cs.ru.nl/Clean).

+
+ +
+
+

Lexers for configuration file formats¶

+
+
+class pygments.lexers.configs.ApacheConfLexer¶
+
+
Short names
+

apacheconf, aconf, apache

+
+
Filenames
+

.htaccess, apache.conf, apache2.conf

+
+
MIME types
+

text/x-apacheconf

+
+
+

Lexer for configuration files following the Apache config file +format.

+
+

New in version 0.6.

+
+
+ +
+
+class pygments.lexers.configs.AugeasLexer¶
+
+
Short names
+

augeas

+
+
Filenames
+

*.aug

+
+
MIME types
+

None

+
+
+

Lexer for Augeas.

+
+

New in version 2.4.

+
+
+ +
+
+class pygments.lexers.configs.Cfengine3Lexer¶
+
+
Short names
+

cfengine3, cf3

+
+
Filenames
+

*.cf

+
+
MIME types
+

None

+
+
+

Lexer for CFEngine3 policy files.

+
+

New in version 1.5.

+
+
+ +
+
+class pygments.lexers.configs.DockerLexer¶
+
+
Short names
+

docker, dockerfile

+
+
Filenames
+

Dockerfile, *.docker

+
+
MIME types
+

text/x-dockerfile-config

+
+
+

Lexer for Docker configuration files.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.configs.IniLexer¶
+
+
Short names
+

ini, cfg, dosini

+
+
Filenames
+

*.ini, *.cfg, *.inf

+
+
MIME types
+

text/x-ini, text/inf

+
+
+

Lexer for configuration files in INI style.

+
+ +
+
+class pygments.lexers.configs.KconfigLexer¶
+
+
Short names
+

kconfig, menuconfig, linux-config, kernel-config

+
+
Filenames
+

Kconfig, *Config.in*, external.in*, standard-modules.in

+
+
MIME types
+

text/x-kconfig

+
+
+

For Linux-style Kconfig files.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.configs.LighttpdConfLexer¶
+
+
Short names
+

lighty, lighttpd

+
+
Filenames
+

None

+
+
MIME types
+

text/x-lighttpd-conf

+
+
+

Lexer for Lighttpd configuration files.

+
+

New in version 0.11.

+
+
+ +
+
+class pygments.lexers.configs.NginxConfLexer¶
+
+
Short names
+

nginx

+
+
Filenames
+

nginx.conf

+
+
MIME types
+

text/x-nginx-conf

+
+
+

Lexer for Nginx configuration files.

+
+

New in version 0.11.

+
+
+ +
+
+class pygments.lexers.configs.PacmanConfLexer¶
+
+
Short names
+

pacmanconf

+
+
Filenames
+

pacman.conf

+
+
MIME types
+

None

+
+
+

Lexer for pacman.conf.

+

Actually, IniLexer works almost fine for this format, +but it yield error token. It is because pacman.conf has +a form without assignment like:

+
+

UseSyslog +Color +TotalDownload +CheckSpace +VerbosePkgLists

+
+

These are flags to switch on.

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.configs.PkgConfigLexer¶
+
+
Short names
+

pkgconfig

+
+
Filenames
+

*.pc

+
+
MIME types
+

None

+
+
+

Lexer for pkg-config +(see also manual page).

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.configs.PropertiesLexer¶
+
+
Short names
+

properties, jproperties

+
+
Filenames
+

*.properties

+
+
MIME types
+

text/x-java-properties

+
+
+

Lexer for configuration files in Java’s properties format.

+

Note: trailing whitespace counts as part of the value as per spec

+
+

New in version 1.4.

+
+
+ +
+
+class pygments.lexers.configs.RegeditLexer¶
+
+
Short names
+

registry

+
+
Filenames
+

*.reg

+
+
MIME types
+

text/x-windows-registry

+
+
+

Lexer for Windows Registry files produced +by regedit.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.configs.SquidConfLexer¶
+
+
Short names
+

squidconf, squid.conf, squid

+
+
Filenames
+

squid.conf

+
+
MIME types
+

text/x-squidconf

+
+
+

Lexer for squid configuration files.

+
+

New in version 0.9.

+
+
+ +
+
+class pygments.lexers.configs.TOMLLexer¶
+
+
Short names
+

toml

+
+
Filenames
+

*.toml

+
+
MIME types
+

None

+
+
+

Lexer for TOML, a simple language +for config files.

+
+

New in version 2.4.

+
+
+ +
+
+class pygments.lexers.configs.TermcapLexer¶
+
+
Short names
+

termcap

+
+
Filenames
+

termcap, termcap.src

+
+
MIME types
+

None

+
+
+

Lexer for termcap database source.

+

This is very simple and minimal.

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.configs.TerminfoLexer¶
+
+
Short names
+

terminfo

+
+
Filenames
+

terminfo, terminfo.src

+
+
MIME types
+

None

+
+
+

Lexer for terminfo database source.

+

This is very simple and minimal.

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.configs.TerraformLexer¶
+
+
Short names
+

terraform, tf

+
+
Filenames
+

*.tf

+
+
MIME types
+

application/x-tf, application/x-terraform

+
+
+

Lexer for terraformi .tf files.

+
+

New in version 2.1.

+
+
+ +
+
+

Lexers for misc console output¶

+
+
+class pygments.lexers.console.PyPyLogLexer¶
+
+
Short names
+

pypylog, pypy

+
+
Filenames
+

*.pypylog

+
+
MIME types
+

application/x-pypylog

+
+
+

Lexer for PyPy log files.

+
+

New in version 1.5.

+
+
+ +
+
+class pygments.lexers.console.VCTreeStatusLexer¶
+
+
Short names
+

vctreestatus

+
+
Filenames
+

None

+
+
MIME types
+

None

+
+
+

For colorizing output of version control status commands, like “hg +status” or “svn status”.

+
+

New in version 2.0.

+
+
+ +
+
+

Lexer for Crystal¶

+
+
+class pygments.lexers.crystal.CrystalLexer¶
+
+
Short names
+

cr, crystal

+
+
Filenames
+

*.cr

+
+
MIME types
+

text/x-crystal

+
+
+

For Crystal source code.

+
+

New in version 2.2.

+
+
+ +
+
+

Lexers for Csound languages¶

+
+
+class pygments.lexers.csound.CsoundDocumentLexer¶
+
+
Short names
+

csound-document, csound-csd

+
+
Filenames
+

*.csd

+
+
MIME types
+

None

+
+
+

For Csound documents.

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.csound.CsoundOrchestraLexer¶
+
+
Short names
+

csound, csound-orc

+
+
Filenames
+

*.orc, *.udo

+
+
MIME types
+

None

+
+
+

For Csound orchestras.

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.csound.CsoundScoreLexer¶
+
+
Short names
+

csound-score, csound-sco

+
+
Filenames
+

*.sco

+
+
MIME types
+

None

+
+
+

For Csound scores.

+
+

New in version 2.1.

+
+
+ +
+ +
+

Lexers for D languages¶

+
+
+class pygments.lexers.d.CrocLexer¶
+
+
Short names
+

croc

+
+
Filenames
+

*.croc

+
+
MIME types
+

text/x-crocsrc

+
+
+

For Croc source.

+
+ +
+
+class pygments.lexers.d.DLexer¶
+
+
Short names
+

d

+
+
Filenames
+

*.d, *.di

+
+
MIME types
+

text/x-dsrc

+
+
+

For D source.

+
+

New in version 1.2.

+
+
+ +
+
+class pygments.lexers.d.MiniDLexer¶
+
+
Short names
+

minid

+
+
Filenames
+

None

+
+
MIME types
+

text/x-minidsrc

+
+
+

For MiniD source. MiniD is now known as Croc.

+
+ +
+ +
+

Lexers for data file format¶

+
+
+class pygments.lexers.data.JsonBareObjectLexer¶
+
+
Short names
+

json-object

+
+
Filenames
+

None

+
+
MIME types
+

application/json-object

+
+
+

For JSON data structures (with missing object curly braces).

+
+

New in version 2.2.

+
+
+ +
+
+class pygments.lexers.data.JsonLdLexer¶
+
+
Short names
+

jsonld, json-ld

+
+
Filenames
+

*.jsonld

+
+
MIME types
+

application/ld+json

+
+
+

For JSON-LD linked data.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.data.JsonLexer¶
+
+
Short names
+

json

+
+
Filenames
+

*.json

+
+
MIME types
+

application/json

+
+
+

For JSON data structures.

+
+

New in version 1.5.

+
+
+ +
+
+class pygments.lexers.data.YamlLexer¶
+
+
Short names
+

yaml

+
+
Filenames
+

*.yaml, *.yml

+
+
MIME types
+

text/x-yaml

+
+
+

Lexer for YAML, a human-friendly data serialization +language.

+
+

New in version 0.11.

+
+
+ +
+
+

Lexers for diff/patch formats¶

+
+
+class pygments.lexers.diff.DarcsPatchLexer¶
+
+
Short names
+

dpatch

+
+
Filenames
+

*.dpatch, *.darcspatch

+
+
MIME types
+

None

+
+
+

DarcsPatchLexer is a lexer for the various versions of the darcs patch +format. Examples of this format are derived by commands such as +darcs annotate --patch and darcs send.

+
+

New in version 0.10.

+
+
+ +
+
+class pygments.lexers.diff.DiffLexer¶
+
+
Short names
+

diff, udiff

+
+
Filenames
+

*.diff, *.patch

+
+
MIME types
+

text/x-diff, text/x-patch

+
+
+

Lexer for unified or context-style diffs or patches.

+
+ +
+
+class pygments.lexers.diff.WDiffLexer¶
+
+
Short names
+

wdiff

+
+
Filenames
+

*.wdiff

+
+
MIME types
+

None

+
+
+

A wdiff lexer.

+

Note that:

+
    +
  • only to normal output (without option like -l).

  • +
  • if target files of wdiff contain “[-“, “-]”, “{+”, “+}”, +especially they are unbalanced, this lexer will get confusing.

  • +
+
+

New in version 2.2.

+
+
+ +
+
+

Lexers for .net languages¶

+
+
+class pygments.lexers.dotnet.BooLexer¶
+
+
Short names
+

boo

+
+
Filenames
+

*.boo

+
+
MIME types
+

text/x-boo

+
+
+

For Boo source code.

+
+ +
+
+class pygments.lexers.dotnet.CSharpAspxLexer¶
+
+
Short names
+

aspx-cs

+
+
Filenames
+

*.aspx, *.asax, *.ascx, *.ashx, *.asmx, *.axd

+
+
MIME types
+

None

+
+
+

Lexer for highlighting C# within ASP.NET pages.

+
+ +
+
+class pygments.lexers.dotnet.CSharpLexer¶
+
+
Short names
+

csharp, c#

+
+
Filenames
+

*.cs

+
+
MIME types
+

text/x-csharp

+
+
+

For C# +source code.

+

Additional options accepted:

+
+
unicodelevel

Determines which Unicode characters this lexer allows for identifiers. +The possible values are:

+
    +
  • none – only the ASCII letters and numbers are allowed. This +is the fastest selection.

  • +
  • basic – all Unicode characters from the specification except +category Lo are allowed.

  • +
  • full – all Unicode characters as specified in the C# specs +are allowed. Note that this means a considerable slowdown since the +Lo category has more than 40,000 characters in it!

  • +
+

The default value is basic.

+
+

New in version 0.8.

+
+
+
+
+ +
+
+class pygments.lexers.dotnet.FSharpLexer¶
+
+
Short names
+

fsharp, f#

+
+
Filenames
+

*.fs, *.fsi

+
+
MIME types
+

text/x-fsharp

+
+
+

For the F# language (version 3.0).

+
+

New in version 1.5.

+
+
+ +
+
+class pygments.lexers.dotnet.NemerleLexer¶
+
+
Short names
+

nemerle

+
+
Filenames
+

*.n

+
+
MIME types
+

text/x-nemerle

+
+
+

For Nemerle source code.

+

Additional options accepted:

+
+
unicodelevel

Determines which Unicode characters this lexer allows for identifiers. +The possible values are:

+
    +
  • none – only the ASCII letters and numbers are allowed. This +is the fastest selection.

  • +
  • basic – all Unicode characters from the specification except +category Lo are allowed.

  • +
  • full – all Unicode characters as specified in the C# specs +are allowed. Note that this means a considerable slowdown since the +Lo category has more than 40,000 characters in it!

  • +
+

The default value is basic.

+
+
+
+

New in version 1.5.

+
+
+ +
+
+class pygments.lexers.dotnet.VbNetAspxLexer¶
+
+
Short names
+

aspx-vb

+
+
Filenames
+

*.aspx, *.asax, *.ascx, *.ashx, *.asmx, *.axd

+
+
MIME types
+

None

+
+
+

Lexer for highlighting Visual Basic.net within ASP.NET pages.

+
+ +
+
+class pygments.lexers.dotnet.VbNetLexer¶
+
+
Short names
+

vb.net, vbnet

+
+
Filenames
+

*.vb, *.bas

+
+
MIME types
+

text/x-vbnet, text/x-vba

+
+
+

For +Visual Basic.NET +source code.

+
+ +
+
+

Lexers for various domain-specific languages¶

+
+
+class pygments.lexers.dsls.AlloyLexer¶
+
+
Short names
+

alloy

+
+
Filenames
+

*.als

+
+
MIME types
+

text/x-alloy

+
+
+

For Alloy source code.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.dsls.CrmshLexer¶
+
+
Short names
+

crmsh, pcmk

+
+
Filenames
+

*.crmsh, *.pcmk

+
+
MIME types
+

None

+
+
+

Lexer for crmsh configuration files +for Pacemaker clusters.

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.dsls.FlatlineLexer¶
+
+
Short names
+

flatline

+
+
Filenames
+

None

+
+
MIME types
+

text/x-flatline

+
+
+

Lexer for Flatline expressions.

+
+

New in version 2.2.

+
+
+ +
+
+class pygments.lexers.dsls.MscgenLexer¶
+
+
Short names
+

mscgen, msc

+
+
Filenames
+

*.msc

+
+
MIME types
+

None

+
+
+

For Mscgen files.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.dsls.PanLexer¶
+
+
Short names
+

pan

+
+
Filenames
+

*.pan

+
+
MIME types
+

None

+
+
+

Lexer for pan source files.

+

Based on tcsh lexer.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.dsls.ProtoBufLexer¶
+
+
Short names
+

protobuf, proto

+
+
Filenames
+

*.proto

+
+
MIME types
+

None

+
+
+

Lexer for Protocol Buffer +definition files.

+
+

New in version 1.4.

+
+
+ +
+
+class pygments.lexers.dsls.PuppetLexer¶
+
+
Short names
+

puppet

+
+
Filenames
+

*.pp

+
+
MIME types
+

None

+
+
+

For Puppet configuration DSL.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.dsls.RslLexer¶
+
+
Short names
+

rsl

+
+
Filenames
+

*.rsl

+
+
MIME types
+

text/rsl

+
+
+

RSL is the formal specification +language used in RAISE (Rigorous Approach to Industrial Software Engineering) +method.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.dsls.SnowballLexer¶
+
+
Short names
+

snowball

+
+
Filenames
+

*.sbl

+
+
MIME types
+

None

+
+
+

Lexer for Snowball source code.

+
+

New in version 2.2.

+
+
+ +
+
+class pygments.lexers.dsls.ThriftLexer¶
+
+
Short names
+

thrift

+
+
Filenames
+

*.thrift

+
+
MIME types
+

application/x-thrift

+
+
+

For Thrift interface definitions.

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.dsls.VGLLexer¶
+
+
Short names
+

vgl

+
+
Filenames
+

*.rpf

+
+
MIME types
+

None

+
+
+

For SampleManager VGL +source code.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.dsls.ZeekLexer¶
+
+
Short names
+

zeek, bro

+
+
Filenames
+

*.zeek, *.bro

+
+
MIME types
+

None

+
+
+

For Zeek scripts.

+
+

New in version 2.5.

+
+
+ +
+
+

Lexers for the Dylan language¶

+
+
+class pygments.lexers.dylan.DylanConsoleLexer¶
+
+
Short names
+

dylan-console, dylan-repl

+
+
Filenames
+

*.dylan-console

+
+
MIME types
+

text/x-dylan-console

+
+
+

For Dylan interactive console output like:

+
? let a = 1;
+=> 1
+? a
+=> 1
+
+
+

This is based on a copy of the RubyConsoleLexer.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.dylan.DylanLexer¶
+
+
Short names
+

dylan

+
+
Filenames
+

*.dylan, *.dyl, *.intr

+
+
MIME types
+

text/x-dylan

+
+
+

For the Dylan language.

+
+

New in version 0.7.

+
+
+ +
+
+class pygments.lexers.dylan.DylanLidLexer¶
+
+
Short names
+

dylan-lid, lid

+
+
Filenames
+

*.lid, *.hdp

+
+
MIME types
+

text/x-dylan-lid

+
+
+

For Dylan LID (Library Interchange Definition) files.

+
+

New in version 1.6.

+
+
+ +
+
+

Lexers for the ECL language¶

+
+
+class pygments.lexers.ecl.ECLLexer¶
+
+
Short names
+

ecl

+
+
Filenames
+

*.ecl

+
+
MIME types
+

application/x-ecl

+
+
+

Lexer for the declarative big-data ECL +language.

+
+

New in version 1.5.

+
+
+ +
+
+

Lexer for the Eiffel language¶

+
+
+class pygments.lexers.eiffel.EiffelLexer¶
+
+
Short names
+

eiffel

+
+
Filenames
+

*.e

+
+
MIME types
+

text/x-eiffel

+
+
+

For Eiffel source code.

+
+

New in version 2.0.

+
+
+ +
+
+

Lexer for the Elm programming language¶

+
+
+class pygments.lexers.elm.ElmLexer¶
+
+
Short names
+

elm

+
+
Filenames
+

*.elm

+
+
MIME types
+

text/x-elm

+
+
+

For Elm source code.

+
+

New in version 2.1.

+
+
+ +
+
+

Lexer for the raw E-mail¶

+
+
+class pygments.lexers.email.EmailLexer¶
+
+
Short names
+

email, eml

+
+
Filenames
+

*.eml

+
+
MIME types
+

message/rfc822

+
+
+

Lexer for raw E-mail.

+

Additional options accepted:

+
+
highlight-X-header

Highlight the fields of X- user-defined email header. (default: +False).

+
+
+
+

New in version 2.5.

+
+
+ +
+
+

Lexers for Erlang¶

+
+
+class pygments.lexers.erlang.ElixirConsoleLexer¶
+
+
Short names
+

iex

+
+
Filenames
+

None

+
+
MIME types
+

text/x-elixir-shellsession

+
+
+

For Elixir interactive console (iex) output like:

+
iex> [head | tail] = [1,2,3]
+[1,2,3]
+iex> head
+1
+iex> tail
+[2,3]
+iex> [head | tail]
+[1,2,3]
+iex> length [head | tail]
+3
+
+
+
+

New in version 1.5.

+
+
+ +
+
+class pygments.lexers.erlang.ElixirLexer¶
+
+
Short names
+

elixir, ex, exs

+
+
Filenames
+

*.ex, *.exs

+
+
MIME types
+

text/x-elixir

+
+
+

For the Elixir language.

+
+

New in version 1.5.

+
+
+ +
+
+class pygments.lexers.erlang.ErlangLexer¶
+
+
Short names
+

erlang

+
+
Filenames
+

*.erl, *.hrl, *.es, *.escript

+
+
MIME types
+

text/x-erlang

+
+
+

For the Erlang functional programming language.

+

Blame Jeremy Thurgood (http://jerith.za.net/).

+
+

New in version 0.9.

+
+
+ +
+
+class pygments.lexers.erlang.ErlangShellLexer¶
+
+
Short names
+

erl

+
+
Filenames
+

*.erl-sh

+
+
MIME types
+

text/x-erl-shellsession

+
+
+

Shell sessions in erl (for Erlang code).

+
+

New in version 1.1.

+
+
+ +
+
+

Lexers for esoteric languages¶

+
+
+class pygments.lexers.esoteric.AheuiLexer¶
+
+
Short names
+

aheui

+
+
Filenames
+

*.aheui

+
+
MIME types
+

None

+
+
+

Aheui Lexer.

+

Aheui is esoteric language based on Korean alphabets.

+
+ +
+
+class pygments.lexers.esoteric.BefungeLexer¶
+
+
Short names
+

befunge

+
+
Filenames
+

*.befunge

+
+
MIME types
+

application/x-befunge

+
+
+

Lexer for the esoteric Befunge +language.

+
+

New in version 0.7.

+
+
+ +
+
+class pygments.lexers.esoteric.BrainfuckLexer¶
+
+
Short names
+

brainfuck, bf

+
+
Filenames
+

*.bf, *.b

+
+
MIME types
+

application/x-brainfuck

+
+
+

Lexer for the esoteric BrainFuck +language.

+
+ +
+
+class pygments.lexers.esoteric.CAmkESLexer¶
+
+
Short names
+

camkes, idl4

+
+
Filenames
+

*.camkes, *.idl4

+
+
MIME types
+

None

+
+
+

Basic lexer for the input language for the +CAmkES component platform.

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.esoteric.CapDLLexer¶
+
+
Short names
+

capdl

+
+
Filenames
+

*.cdl

+
+
MIME types
+

None

+
+
+

Basic lexer for +CapDL.

+

The source of the primary tool that reads such specifications is available +at https://github.com/seL4/capdl/tree/master/capDL-tool. Note that this +lexer only supports a subset of the grammar. For example, identifiers can +shadow type names, but these instances are currently incorrectly +highlighted as types. Supporting this would need a stateful lexer that is +considered unnecessarily complex for now.

+
+

New in version 2.2.

+
+
+ +
+
+class pygments.lexers.esoteric.RedcodeLexer¶
+
+
Short names
+

redcode

+
+
Filenames
+

*.cw

+
+
MIME types
+

None

+
+
+

A simple Redcode lexer based on ICWS’94. +Contributed by Adam Blinkinsop <blinks@acm.org>.

+
+

New in version 0.8.

+
+
+ +
+
+

Pygments lexers for Ezhil language¶

+
+
+class pygments.lexers.ezhil.EzhilLexer¶
+
+
Short names
+

ezhil

+
+
Filenames
+

*.n

+
+
MIME types
+

text/x-ezhil

+
+
+

Lexer for Ezhil, a Tamil script-based programming language

+
+

New in version 2.1.

+
+
+ +
+
+

Lexers for the Factor language¶

+
+
+class pygments.lexers.factor.FactorLexer¶
+
+
Short names
+

factor

+
+
Filenames
+

*.factor

+
+
MIME types
+

text/x-factor

+
+
+

Lexer for the Factor language.

+
+

New in version 1.4.

+
+
+ +
+
+

Lexer for the Fantom language¶

+
+
+class pygments.lexers.fantom.FantomLexer¶
+
+
Short names
+

fan

+
+
Filenames
+

*.fan

+
+
MIME types
+

application/x-fantom

+
+
+

For Fantom source code.

+
+

New in version 1.5.

+
+
+ +
+
+

Lexer for the Felix language¶

+
+
+class pygments.lexers.felix.FelixLexer¶
+
+
Short names
+

felix, flx

+
+
Filenames
+

*.flx, *.flxh

+
+
MIME types
+

text/x-felix

+
+
+

For Felix source code.

+
+

New in version 1.2.

+
+
+ +
+
+

Lexer for FloScript¶

+
+
+class pygments.lexers.floscript.FloScriptLexer¶
+
+
Short names
+

floscript, flo

+
+
Filenames
+

*.flo

+
+
MIME types
+

None

+
+
+

For FloScript configuration language source code.

+
+

New in version 2.4.

+
+
+ +
+
+

Lexer for the Forth language¶

+
+
+class pygments.lexers.forth.ForthLexer¶
+
+
Short names
+

forth

+
+
Filenames
+

*.frt, *.fs

+
+
MIME types
+

application/x-forth

+
+
+

Lexer for Forth files.

+
+

New in version 2.2.

+
+
+ +
+
+

Lexers for Fortran languages¶

+
+
+class pygments.lexers.fortran.FortranFixedLexer¶
+
+
Short names
+

fortranfixed

+
+
Filenames
+

*.f, *.F

+
+
MIME types
+

None

+
+
+

Lexer for fixed format Fortran.

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.fortran.FortranLexer¶
+
+
Short names
+

fortran

+
+
Filenames
+

*.f03, *.f90, *.F03, *.F90

+
+
MIME types
+

text/x-fortran

+
+
+

Lexer for FORTRAN 90 code.

+
+

New in version 0.10.

+
+
+ +
+
+

Simple lexer for Microsoft Visual FoxPro source code¶

+
+
+class pygments.lexers.foxpro.FoxProLexer¶
+
+
Short names
+

foxpro, vfp, clipper, xbase

+
+
Filenames
+

*.PRG, *.prg

+
+
MIME types
+

None

+
+
+

Lexer for Microsoft Visual FoxPro language.

+

FoxPro syntax allows to shorten all keywords and function names +to 4 characters. Shortened forms are not recognized by this lexer.

+
+

New in version 1.6.

+
+
+ +
+
+

Lexer for FreeFem++ language¶

+
+
+class pygments.lexers.freefem.FreeFemLexer¶
+
+
Short names
+

freefem

+
+
Filenames
+

*.edp

+
+
MIME types
+

text/x-freefem

+
+
+

For FreeFem++ source.

+

This is an extension of the CppLexer, as the FreeFem Language is a superset +of C++.

+
+

New in version 2.4.

+
+
+ +
+
+

Lexers for the Google Go language¶

+
+
+class pygments.lexers.go.GoLexer¶
+
+
Short names
+

go

+
+
Filenames
+

*.go

+
+
MIME types
+

text/x-gosrc

+
+
+

For Go source.

+
+

New in version 1.2.

+
+
+ +
+
+

Lexers for grammer notations like BNF¶

+
+
+class pygments.lexers.grammar_notation.AbnfLexer¶
+
+
Short names
+

abnf

+
+
Filenames
+

*.abnf

+
+
MIME types
+

text/x-abnf

+
+
+

Lexer for IETF 7405 ABNF +(Updates 5234) +grammars.

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.grammar_notation.BnfLexer¶
+
+
Short names
+

bnf

+
+
Filenames
+

*.bnf

+
+
MIME types
+

text/x-bnf

+
+
+

This lexer is for grammer notations which are similar to +original BNF.

+

In order to maximize a number of targets of this lexer, +let’s decide some designs:

+
    +
  • We don’t distinguish Terminal Symbol.

  • +
  • We do assume that NonTerminal Symbol are always enclosed +with arrow brackets.

  • +
  • We do assume that NonTerminal Symbol may include +any printable characters except arrow brackets and ASCII 0x20. +This assumption is for RBNF.

  • +
  • We do assume that target notation doesn’t support comment.

  • +
  • We don’t distinguish any operators and punctuation except +::=.

  • +
+

Though these desision making might cause too minimal highlighting +and you might be disappointed, but it is reasonable for us.

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.grammar_notation.JsgfLexer¶
+
+
Short names
+

jsgf

+
+
Filenames
+

*.jsgf

+
+
MIME types
+

application/jsgf, application/x-jsgf, text/jsgf

+
+
+

For JSpeech Grammar Format +grammars.

+
+

New in version 2.2.

+
+
+ +
+
+

Lexers for graph query languages¶

+
+
+class pygments.lexers.graph.CypherLexer¶
+
+
Short names
+

cypher

+
+
Filenames
+

*.cyp, *.cypher

+
+
MIME types
+

None

+
+
+

For Cypher Query Language

+

For the Cypher version in Neo4j 3.3

+
+

New in version 2.0.

+
+
+ +
+ + + +
+

Lexers for hardware descriptor languages¶

+
+
+class pygments.lexers.hdl.SystemVerilogLexer¶
+
+
Short names
+

systemverilog, sv

+
+
Filenames
+

*.sv, *.svh

+
+
MIME types
+

text/x-systemverilog

+
+
+

Extends verilog lexer to recognise all SystemVerilog keywords from IEEE +1800-2009 standard.

+
+

New in version 1.5.

+
+
+ +
+
+class pygments.lexers.hdl.VerilogLexer¶
+
+
Short names
+

verilog, v

+
+
Filenames
+

*.v

+
+
MIME types
+

text/x-verilog

+
+
+

For verilog source code with preprocessor directives.

+
+

New in version 1.4.

+
+
+ +
+
+class pygments.lexers.hdl.VhdlLexer¶
+
+
Short names
+

vhdl

+
+
Filenames
+

*.vhdl, *.vhd

+
+
MIME types
+

text/x-vhdl

+
+
+

For VHDL source code.

+
+

New in version 1.5.

+
+
+ +
+
+

Lexers for hexadecimal dumps¶

+
+
+class pygments.lexers.hexdump.HexdumpLexer¶
+
+
Short names
+

hexdump

+
+
Filenames
+

None

+
+
MIME types
+

None

+
+
+

For typical hex dump output formats by the UNIX and GNU/Linux tools hexdump, +hd, hexcat, od and xxd, and the DOS tool DEBUG. For example:

+
00000000  7f 45 4c 46 02 01 01 00  00 00 00 00 00 00 00 00  |.ELF............|
+00000010  02 00 3e 00 01 00 00 00  c5 48 40 00 00 00 00 00  |..>......H@.....|
+
+
+

The specific supported formats are the outputs of:

+
    +
  • hexdump FILE

  • +
  • hexdump -C FILE – the canonical format used in the example.

  • +
  • hd FILE – same as hexdump -C FILE.

  • +
  • hexcat FILE

  • +
  • od -t x1z FILE

  • +
  • xxd FILE

  • +
  • DEBUG.EXE FILE.COM and entering d to the prompt.

  • +
+
+

New in version 2.1.

+
+
+ +
+ +
+

Lexers for IDL¶

+
+
+class pygments.lexers.idl.IDLLexer¶
+
+
Short names
+

idl

+
+
Filenames
+

*.pro

+
+
MIME types
+

text/idl

+
+
+

Pygments Lexer for IDL (Interactive Data Language).

+
+

New in version 1.6.

+
+
+ +
+
+

Lexers for Igor Pro¶

+
+
+class pygments.lexers.igor.IgorLexer¶
+
+
Short names
+

igor, igorpro

+
+
Filenames
+

*.ipf

+
+
MIME types
+

text/ipf

+
+
+

Pygments Lexer for Igor Pro procedure files (.ipf). +See http://www.wavemetrics.com/ and http://www.igorexchange.com/.

+
+

New in version 2.0.

+
+
+ +
+ +
+

Lexers for installer/packager DSLs and formats¶

+
+
+class pygments.lexers.installers.DebianControlLexer¶
+
+
Short names
+

control, debcontrol

+
+
Filenames
+

control

+
+
MIME types
+

None

+
+
+

Lexer for Debian control files and apt-cache show <pkg> outputs.

+
+

New in version 0.9.

+
+
+ +
+
+class pygments.lexers.installers.NSISLexer¶
+
+
Short names
+

nsis, nsi, nsh

+
+
Filenames
+

*.nsi, *.nsh

+
+
MIME types
+

text/x-nsis

+
+
+

For NSIS scripts.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.installers.RPMSpecLexer¶
+
+
Short names
+

spec

+
+
Filenames
+

*.spec

+
+
MIME types
+

text/x-rpm-spec

+
+
+

For RPM .spec files.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.installers.SourcesListLexer¶
+
+
Short names
+

sourceslist, sources.list, debsources

+
+
Filenames
+

sources.list

+
+
MIME types
+

None

+
+
+

Lexer that highlights debian sources.list files.

+
+

New in version 0.7.

+
+
+ +
+
+

Lexers for interactive fiction languages¶

+
+
+class pygments.lexers.int_fiction.Inform6Lexer¶
+
+
Short names
+

inform6, i6

+
+
Filenames
+

*.inf

+
+
MIME types
+

None

+
+
+

For Inform 6 source code.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.int_fiction.Inform6TemplateLexer¶
+
+
Short names
+

i6t

+
+
Filenames
+

*.i6t

+
+
MIME types
+

None

+
+
+

For Inform 6 template code.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.int_fiction.Inform7Lexer¶
+
+
Short names
+

inform7, i7

+
+
Filenames
+

*.ni, *.i7x

+
+
MIME types
+

None

+
+
+

For Inform 7 source code.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.int_fiction.Tads3Lexer¶
+
+
Short names
+

tads3

+
+
Filenames
+

*.t

+
+
MIME types
+

None

+
+
+

For TADS 3 source code.

+
+ +
+
+

Lexers for the Io language¶

+
+
+class pygments.lexers.iolang.IoLexer¶
+
+
Short names
+

io

+
+
Filenames
+

*.io

+
+
MIME types
+

text/x-iosrc

+
+
+

For Io (a small, prototype-based +programming language) source.

+
+

New in version 0.10.

+
+
+ +
+
+

Lexer for the J programming language¶

+
+
+class pygments.lexers.j.JLexer¶
+
+
Short names
+

j

+
+
Filenames
+

*.ijs

+
+
MIME types
+

text/x-j

+
+
+

For J source code.

+
+

New in version 2.1.

+
+
+ +
+ +
+

Lexers for the Julia language¶

+
+
+class pygments.lexers.julia.JuliaConsoleLexer¶
+
+
Short names
+

jlcon

+
+
Filenames
+

None

+
+
MIME types
+

None

+
+
+

For Julia console sessions. Modeled after MatlabSessionLexer.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.julia.JuliaLexer¶
+
+
Short names
+

julia, jl

+
+
Filenames
+

*.jl

+
+
MIME types
+

text/x-julia, application/x-julia

+
+
+

For Julia source code.

+
+

New in version 1.6.

+
+
+ +
+
+

Pygments lexers for JVM languages¶

+
+
+class pygments.lexers.jvm.AspectJLexer¶
+
+
Short names
+

aspectj

+
+
Filenames
+

*.aj

+
+
MIME types
+

text/x-aspectj

+
+
+

For AspectJ source code.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.jvm.CeylonLexer¶
+
+
Short names
+

ceylon

+
+
Filenames
+

*.ceylon

+
+
MIME types
+

text/x-ceylon

+
+
+

For Ceylon source code.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.jvm.ClojureLexer¶
+
+
Short names
+

clojure, clj

+
+
Filenames
+

*.clj

+
+
MIME types
+

text/x-clojure, application/x-clojure

+
+
+

Lexer for Clojure source code.

+
+

New in version 0.11.

+
+
+ +
+
+class pygments.lexers.jvm.ClojureScriptLexer¶
+
+
Short names
+

clojurescript, cljs

+
+
Filenames
+

*.cljs

+
+
MIME types
+

text/x-clojurescript, application/x-clojurescript

+
+
+

Lexer for ClojureScript +source code.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.jvm.GoloLexer¶
+
+
Short names
+

golo

+
+
Filenames
+

*.golo

+
+
MIME types
+

None

+
+
+

For Golo source code.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.jvm.GosuLexer¶
+
+
Short names
+

gosu

+
+
Filenames
+

*.gs, *.gsx, *.gsp, *.vark

+
+
MIME types
+

text/x-gosu

+
+
+

For Gosu source code.

+
+

New in version 1.5.

+
+
+ +
+
+class pygments.lexers.jvm.GosuTemplateLexer¶
+
+
Short names
+

gst

+
+
Filenames
+

*.gst

+
+
MIME types
+

text/x-gosu-template

+
+
+

For Gosu templates.

+
+

New in version 1.5.

+
+
+ +
+
+class pygments.lexers.jvm.GroovyLexer¶
+
+
Short names
+

groovy

+
+
Filenames
+

*.groovy, *.gradle

+
+
MIME types
+

text/x-groovy

+
+
+

For Groovy source code.

+
+

New in version 1.5.

+
+
+ +
+
+class pygments.lexers.jvm.IokeLexer¶
+
+
Short names
+

ioke, ik

+
+
Filenames
+

*.ik

+
+
MIME types
+

text/x-iokesrc

+
+
+

For Ioke (a strongly typed, dynamic, +prototype based programming language) source.

+
+

New in version 1.4.

+
+
+ +
+
+class pygments.lexers.jvm.JasminLexer¶
+
+
Short names
+

jasmin, jasminxt

+
+
Filenames
+

*.j

+
+
MIME types
+

None

+
+
+

For Jasmin assembly code.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.jvm.JavaLexer¶
+
+
Short names
+

java

+
+
Filenames
+

*.java

+
+
MIME types
+

text/x-java

+
+
+

For Java source code.

+
+ +
+
+class pygments.lexers.jvm.KotlinLexer¶
+
+
Short names
+

kotlin

+
+
Filenames
+

*.kt

+
+
MIME types
+

text/x-kotlin

+
+
+

For Kotlin +source code.

+
+

New in version 1.5.

+
+
+ +
+
+class pygments.lexers.jvm.PigLexer¶
+
+
Short names
+

pig

+
+
Filenames
+

*.pig

+
+
MIME types
+

text/x-pig

+
+
+

For Pig Latin source code.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.jvm.SarlLexer¶
+
+
Short names
+

sarl

+
+
Filenames
+

*.sarl

+
+
MIME types
+

text/x-sarl

+
+
+
+ +

For SARL source code.

+

+ .. versionadded:: 2.4

+

+
+
+class pygments.lexers.jvm.ScalaLexer¶
+
+
Short names
+

scala

+
+
Filenames
+

*.scala

+
+
MIME types
+

text/x-scala

+
+
+

For Scala source code.

+
+ +
+
+class pygments.lexers.jvm.XtendLexer¶
+
+
Short names
+

xtend

+
+
Filenames
+

*.xtend

+
+
MIME types
+

text/x-xtend

+
+
+

For Xtend source code.

+
+

New in version 1.6.

+
+
+ +
+
+

Lexers for Lispy languages¶

+
+
+class pygments.lexers.lisp.CPSALexer¶
+
+
Short names
+

cpsa

+
+
Filenames
+

*.cpsa

+
+
MIME types
+

None

+
+
+

A CPSA lexer based on the CPSA language as of version 2.2.12

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.lisp.CommonLispLexer¶
+
+
Short names
+

common-lisp, cl, lisp

+
+
Filenames
+

*.cl, *.lisp

+
+
MIME types
+

text/x-common-lisp

+
+
+

A Common Lisp lexer.

+
+

New in version 0.9.

+
+
+ +
+
+class pygments.lexers.lisp.EmacsLispLexer¶
+
+
Short names
+

emacs, elisp, emacs-lisp

+
+
Filenames
+

*.el

+
+
MIME types
+

text/x-elisp, application/x-elisp

+
+
+

An ELisp lexer, parsing a stream and outputting the tokens +needed to highlight elisp code.

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.lisp.FennelLexer¶
+
+
Short names
+

fennel, fnl

+
+
Filenames
+

*.fnl

+
+
MIME types
+

None

+
+
+

A lexer for the Fennel programming language.

+

Fennel compiles to Lua, so all the Lua builtins are recognized as well +as the special forms that are particular to the Fennel compiler.

+
+

New in version 2.3.

+
+
+ +
+
+class pygments.lexers.lisp.HyLexer¶
+
+
Short names
+

hylang

+
+
Filenames
+

*.hy

+
+
MIME types
+

text/x-hy, application/x-hy

+
+
+

Lexer for Hy source code.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.lisp.NewLispLexer¶
+
+
Short names
+

newlisp

+
+
Filenames
+

*.lsp, *.nl, *.kif

+
+
MIME types
+

text/x-newlisp, application/x-newlisp

+
+
+

For newLISP. source code (version 10.3.0).

+
+

New in version 1.5.

+
+
+ +
+
+class pygments.lexers.lisp.RacketLexer¶
+
+
Short names
+

racket, rkt

+
+
Filenames
+

*.rkt, *.rktd, *.rktl

+
+
MIME types
+

text/x-racket, application/x-racket

+
+
+

Lexer for Racket source code (formerly +known as PLT Scheme).

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.lisp.SchemeLexer¶
+
+
Short names
+

scheme, scm

+
+
Filenames
+

*.scm, *.ss

+
+
MIME types
+

text/x-scheme, application/x-scheme

+
+
+

A Scheme lexer, parsing a stream and outputting the tokens +needed to highlight scheme code. +This lexer could be most probably easily subclassed to parse +other LISP-Dialects like Common Lisp, Emacs Lisp or AutoLisp.

+

This parser is checked with pastes from the LISP pastebin +at http://paste.lisp.org/ to cover as much syntax as possible.

+

It supports the full Scheme syntax as defined in R5RS.

+
+

New in version 0.6.

+
+
+ +
+
+class pygments.lexers.lisp.ShenLexer¶
+
+
Short names
+

shen

+
+
Filenames
+

*.shen

+
+
MIME types
+

text/x-shen, application/x-shen

+
+
+

Lexer for Shen source code.

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.lisp.XtlangLexer¶
+
+
Short names
+

extempore

+
+
Filenames
+

*.xtm

+
+
MIME types
+

None

+
+
+

An xtlang lexer for the Extempore programming environment.

+

This is a mixture of Scheme and xtlang, really. Keyword lists are +taken from the Extempore Emacs mode +(https://github.com/extemporelang/extempore-emacs-mode)

+
+

New in version 2.2.

+
+
+ +
+
+

Lexers for Makefiles and similar¶

+
+
+class pygments.lexers.make.BaseMakefileLexer¶
+
+
Short names
+

basemake

+
+
Filenames
+

None

+
+
MIME types
+

None

+
+
+

Lexer for simple Makefiles (no preprocessing).

+
+

New in version 0.10.

+
+
+ +
+
+class pygments.lexers.make.CMakeLexer¶
+
+
Short names
+

cmake

+
+
Filenames
+

*.cmake, CMakeLists.txt

+
+
MIME types
+

text/x-cmake

+
+
+

Lexer for CMake files.

+
+

New in version 1.2.

+
+
+ +
+
+class pygments.lexers.make.MakefileLexer¶
+
+
Short names
+

make, makefile, mf, bsdmake

+
+
Filenames
+

*.mak, *.mk, Makefile, makefile, Makefile.*, GNUmakefile

+
+
MIME types
+

text/x-makefile

+
+
+

Lexer for BSD and GNU make extensions (lenient enough to handle both in +the same file even).

+

Rewritten in Pygments 0.10.

+
+ +
+
+

Lexers for non-HTML markup languages¶

+
+
+class pygments.lexers.markup.BBCodeLexer¶
+
+
Short names
+

bbcode

+
+
Filenames
+

None

+
+
MIME types
+

text/x-bbcode

+
+
+

A lexer that highlights BBCode(-like) syntax.

+
+

New in version 0.6.

+
+
+ +
+
+class pygments.lexers.markup.GroffLexer¶
+
+
Short names
+

groff, nroff, man

+
+
Filenames
+

*.[1234567], *.man

+
+
MIME types
+

application/x-troff, text/troff

+
+
+

Lexer for the (g)roff typesetting language, supporting groff +extensions. Mainly useful for highlighting manpage sources.

+
+

New in version 0.6.

+
+
+ +
+
+class pygments.lexers.markup.MarkdownLexer¶
+
+
Short names
+

md

+
+
Filenames
+

*.md

+
+
MIME types
+

text/x-markdown

+
+
+

For Markdown markup.

+
+

New in version 2.2.

+
+
+ +
+
+class pygments.lexers.markup.MoinWikiLexer¶
+
+
Short names
+

trac-wiki, moin

+
+
Filenames
+

None

+
+
MIME types
+

text/x-trac-wiki

+
+
+

For MoinMoin (and Trac) Wiki markup.

+
+

New in version 0.7.

+
+
+ +
+
+class pygments.lexers.markup.MozPreprocCssLexer¶
+
+
Short names
+

css+mozpreproc

+
+
Filenames
+

*.css.in

+
+
MIME types
+

None

+
+
+

Subclass of the MozPreprocHashLexer that highlights unlexed data with the +CssLexer.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.markup.MozPreprocHashLexer¶
+
+
Short names
+

mozhashpreproc

+
+
Filenames
+

None

+
+
MIME types
+

None

+
+
+

Lexer for Mozilla Preprocessor files (with ‘#’ as the marker).

+

Other data is left untouched.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.markup.MozPreprocJavascriptLexer¶
+
+
Short names
+

javascript+mozpreproc

+
+
Filenames
+

*.js.in

+
+
MIME types
+

None

+
+
+

Subclass of the MozPreprocHashLexer that highlights unlexed data with the +JavascriptLexer.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.markup.MozPreprocPercentLexer¶
+
+
Short names
+

mozpercentpreproc

+
+
Filenames
+

None

+
+
MIME types
+

None

+
+
+

Lexer for Mozilla Preprocessor files (with ‘%’ as the marker).

+

Other data is left untouched.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.markup.MozPreprocXulLexer¶
+
+
Short names
+

xul+mozpreproc

+
+
Filenames
+

*.xul.in

+
+
MIME types
+

None

+
+
+

Subclass of the MozPreprocHashLexer that highlights unlexed data with the +XmlLexer.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.markup.RstLexer¶
+
+
Short names
+

rst, rest, restructuredtext

+
+
Filenames
+

*.rst, *.rest

+
+
MIME types
+

text/x-rst, text/prs.fallenstein.rst

+
+
+

For reStructuredText markup.

+
+

New in version 0.7.

+
+

Additional options accepted:

+
+
handlecodeblocks

Highlight the contents of .. sourcecode:: language, +.. code:: language and .. code-block:: language +directives with a lexer for the given language (default: +True).

+
+

New in version 0.8.

+
+
+
+
+ +
+
+class pygments.lexers.markup.TexLexer¶
+
+
Short names
+

tex, latex

+
+
Filenames
+

*.tex, *.aux, *.toc

+
+
MIME types
+

text/x-tex, text/x-latex

+
+
+

Lexer for the TeX and LaTeX typesetting languages.

+
+ +
+ +
+

Lexer for Multipurpose Internet Mail Extensions (MIME) data¶

+
+
+class pygments.lexers.mime.MIMELexer¶
+
+
Short names
+

mime

+
+
Filenames
+

None

+
+
MIME types
+

multipart/mixed, multipart/related, multipart/alternative

+
+
+

Lexer for Multipurpose Internet Mail Extensions (MIME) data. This lexer is +designed to process the nested mulitpart data.

+

It assumes that the given data contains both header and body (and is +splitted by empty line). If no valid header is found, then the entire data +would be treated as body.

+

Additional options accepted:

+
+
MIME-max-level

Max recurssion level for nested MIME structure. Any negative number +would treated as unlimited. (default: -1)

+
+
Content-Type

Treat the data as specific content type. Useful when header is +missing, or this lexer would try to parse from header. (default: +text/plain)

+
+
Multipart-Boundary

Set the default multipart boundary delimiter. This option is only used +when Content-Type is multipart and header is missing. This lexer +would try to parse from header by default. (default: None)

+
+
Content-Transfer-Encoding

Treat the data as specific encoding. Or this lexer would try to parse +from header by default. (default: None)

+
+
+
+

New in version 2.5.

+
+
+ +
+
+

Lexers for ML family languages¶

+
+
+class pygments.lexers.ml.OcamlLexer¶
+
+
Short names
+

ocaml

+
+
Filenames
+

*.ml, *.mli, *.mll, *.mly

+
+
MIME types
+

text/x-ocaml

+
+
+

For the OCaml language.

+
+

New in version 0.7.

+
+
+ +
+
+class pygments.lexers.ml.OpaLexer¶
+
+
Short names
+

opa

+
+
Filenames
+

*.opa

+
+
MIME types
+

text/x-opa

+
+
+

Lexer for the Opa language (http://opalang.org).

+
+

New in version 1.5.

+
+
+ +
+
+class pygments.lexers.ml.SMLLexer¶
+
+
Short names
+

sml

+
+
Filenames
+

*.sml, *.sig, *.fun

+
+
MIME types
+

text/x-standardml, application/x-standardml

+
+
+

For the Standard ML language.

+
+

New in version 1.5.

+
+
+ +
+
+

Lexers for modeling languages¶

+
+
+class pygments.lexers.modeling.BugsLexer¶
+
+
Short names
+

bugs, winbugs, openbugs

+
+
Filenames
+

*.bug

+
+
MIME types
+

None

+
+
+

Pygments Lexer for OpenBugs and WinBugs +models.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.modeling.JagsLexer¶
+
+
Short names
+

jags

+
+
Filenames
+

*.jag, *.bug

+
+
MIME types
+

None

+
+
+

Pygments Lexer for JAGS.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.modeling.ModelicaLexer¶
+
+
Short names
+

modelica

+
+
Filenames
+

*.mo

+
+
MIME types
+

text/x-modelica

+
+
+

For Modelica source code.

+
+

New in version 1.1.

+
+
+ +
+
+class pygments.lexers.modeling.StanLexer¶
+
+
Short names
+

stan

+
+
Filenames
+

*.stan

+
+
MIME types
+

None

+
+
+

Pygments Lexer for Stan models.

+

The Stan modeling language is specified in the Stan Modeling Language +User’s Guide and Reference Manual, v2.17.0, +pdf.

+
+

New in version 1.6.

+
+
+ +
+
+

Multi-Dialect Lexer for Modula-2¶

+
+
+class pygments.lexers.modula2.Modula2Lexer¶
+
+
Short names
+

modula2, m2

+
+
Filenames
+

*.def, *.mod

+
+
MIME types
+

text/x-modula2

+
+
+

For Modula-2 source code.

+

The Modula-2 lexer supports several dialects. By default, it operates in +fallback mode, recognising the combined literals, punctuation symbols +and operators of all supported dialects, and the combined reserved words +and builtins of PIM Modula-2, ISO Modula-2 and Modula-2 R10, while not +differentiating between library defined identifiers.

+

To select a specific dialect, a dialect option may be passed +or a dialect tag may be embedded into a source file.

+

Dialect Options:

+
+
m2pim

Select PIM Modula-2 dialect.

+
+
m2iso

Select ISO Modula-2 dialect.

+
+
m2r10

Select Modula-2 R10 dialect.

+
+
objm2

Select Objective Modula-2 dialect.

+
+
+

The PIM and ISO dialect options may be qualified with a language extension.

+

Language Extensions:

+
+
+aglet

Select Aglet Modula-2 extensions, available with m2iso.

+
+
+gm2

Select GNU Modula-2 extensions, available with m2pim.

+
+
+p1

Select p1 Modula-2 extensions, available with m2iso.

+
+
+xds

Select XDS Modula-2 extensions, available with m2iso.

+
+
+

Passing a Dialect Option via Unix Commandline Interface

+

Dialect options may be passed to the lexer using the dialect key. +Only one such option should be passed. If multiple dialect options are +passed, the first valid option is used, any subsequent options are ignored.

+

Examples:

+
+
$ pygmentize -O full,dialect=m2iso -f html -o /path/to/output /path/to/input

Use ISO dialect to render input to HTML output

+
+
$ pygmentize -O full,dialect=m2iso+p1 -f rtf -o /path/to/output /path/to/input

Use ISO dialect with p1 extensions to render input to RTF output

+
+
+

Embedding a Dialect Option within a source file

+

A dialect option may be embedded in a source file in form of a dialect +tag, a specially formatted comment that specifies a dialect option.

+

Dialect Tag EBNF:

+
dialectTag :
+    OpeningCommentDelim Prefix dialectOption ClosingCommentDelim ;
+
+dialectOption :
+    'm2pim' | 'm2iso' | 'm2r10' | 'objm2' |
+    'm2iso+aglet' | 'm2pim+gm2' | 'm2iso+p1' | 'm2iso+xds' ;
+
+Prefix : '!' ;
+
+OpeningCommentDelim : '(*' ;
+
+ClosingCommentDelim : '*)' ;
+
+
+

No whitespace is permitted between the tokens of a dialect tag.

+

In the event that a source file contains multiple dialect tags, the first +tag that contains a valid dialect option will be used and any subsequent +dialect tags will be ignored. Ideally, a dialect tag should be placed +at the beginning of a source file.

+

An embedded dialect tag overrides a dialect option set via command line.

+

Examples:

+
+
(*!m2r10*) DEFINITION MODULE Foobar; ...

Use Modula2 R10 dialect to render this source file.

+
+
(*!m2pim+gm2*) DEFINITION MODULE Bazbam; ...

Use PIM dialect with GNU extensions to render this source file.

+
+
+

Algol Publication Mode:

+

In Algol publication mode, source text is rendered for publication of +algorithms in scientific papers and academic texts, following the format +of the Revised Algol-60 Language Report. It is activated by passing +one of two corresponding styles as an option:

+
+
algol

render reserved words lowercase underline boldface +and builtins lowercase boldface italic

+
+
algol_nu

render reserved words lowercase boldface (no underlining) +and builtins lowercase boldface italic

+
+
+

The lexer automatically performs the required lowercase conversion when +this mode is activated.

+

Example:

+
+
$ pygmentize -O full,style=algol -f latex -o /path/to/output /path/to/input

Render input file in Algol publication mode to LaTeX output.

+
+
+

Rendering Mode of First Class ADT Identifiers:

+

The rendering of standard library first class ADT identifiers is controlled +by option flag “treat_stdlib_adts_as_builtins”.

+

When this option is turned on, standard library ADT identifiers are rendered +as builtins. When it is turned off, they are rendered as ordinary library +identifiers.

+

treat_stdlib_adts_as_builtins (default: On)

+

The option is useful for dialects that support ADTs as first class objects +and provide ADTs in the standard library that would otherwise be built-in.

+

At present, only Modula-2 R10 supports library ADTs as first class objects +and therefore, no ADT identifiers are defined for any other dialects.

+

Example:

+
+
$ pygmentize -O full,dialect=m2r10,treat_stdlib_adts_as_builtins=Off ...

Render standard library ADTs as ordinary library types.

+
+
+
+

New in version 1.3.

+
+
+

Changed in version 2.1: Added multi-dialect support.

+
+
+ +
+
+

Lexer for the Monte programming language¶

+
+
+class pygments.lexers.monte.MonteLexer¶
+
+
Short names
+

monte

+
+
Filenames
+

*.mt

+
+
MIME types
+

None

+
+
+

Lexer for the Monte programming language.

+
+

New in version 2.2.

+
+
+ +
+
+

Lexers for NCAR Command Language¶

+
+
+class pygments.lexers.ncl.NCLLexer¶
+
+
Short names
+

ncl

+
+
Filenames
+

*.ncl

+
+
MIME types
+

text/ncl

+
+
+

Lexer for NCL code.

+
+

New in version 2.2.

+
+
+ +
+
+

Lexer for the Nim language (formerly known as Nimrod)¶

+
+
+class pygments.lexers.nimrod.NimrodLexer¶
+
+
Short names
+

nim, nimrod

+
+
Filenames
+

*.nim, *.nimrod

+
+
MIME types
+

text/x-nim

+
+
+

For Nim source code.

+
+

New in version 1.5.

+
+
+ +
+
+

Lexer for the Nit language¶

+
+
+class pygments.lexers.nit.NitLexer¶
+
+
Short names
+

nit

+
+
Filenames
+

*.nit

+
+
MIME types
+

None

+
+
+

For nit source.

+
+

New in version 2.0.

+
+
+ +
+
+

Lexers for the NixOS Nix language¶

+
+
+class pygments.lexers.nix.NixLexer¶
+
+
Short names
+

nixos, nix

+
+
Filenames
+

*.nix

+
+
MIME types
+

text/x-nix

+
+
+

For the Nix language.

+
+

New in version 2.0.

+
+
+ +
+
+

Lexers for Oberon family languages¶

+
+
+class pygments.lexers.oberon.ComponentPascalLexer¶
+
+
Short names
+

componentpascal, cp

+
+
Filenames
+

*.cp, *.cps

+
+
MIME types
+

text/x-component-pascal

+
+
+

For Component Pascal source code.

+
+

New in version 2.1.

+
+
+ +
+
+

Lexers for Objective-C family languages¶

+
+
+class pygments.lexers.objective.LogosLexer¶
+
+
Short names
+

logos

+
+
Filenames
+

*.x, *.xi, *.xm, *.xmi

+
+
MIME types
+

text/x-logos

+
+
+

For Logos + Objective-C source code with preprocessor directives.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.objective.ObjectiveCLexer¶
+
+
Short names
+

objective-c, objectivec, obj-c, objc

+
+
Filenames
+

*.m, *.h

+
+
MIME types
+

text/x-objective-c

+
+
+

For Objective-C source code with preprocessor directives.

+
+ +
+
+class pygments.lexers.objective.ObjectiveCppLexer¶
+
+
Short names
+

objective-c++, objectivec++, obj-c++, objc++

+
+
Filenames
+

*.mm, *.hh

+
+
MIME types
+

text/x-objective-c++

+
+
+

For Objective-C++ source code with preprocessor directives.

+
+ +
+
+class pygments.lexers.objective.SwiftLexer¶
+
+
Short names
+

swift

+
+
Filenames
+

*.swift

+
+
MIME types
+

text/x-swift

+
+
+

For Swift source.

+
+

New in version 2.0.

+
+
+ +
+
+

Lexers for the Ooc language¶

+
+
+class pygments.lexers.ooc.OocLexer¶
+
+
Short names
+

ooc

+
+
Filenames
+

*.ooc

+
+
MIME types
+

text/x-ooc

+
+
+

For Ooc source code

+
+

New in version 1.2.

+
+
+ +
+
+

Lexer for ParaSail¶

+
+
+class pygments.lexers.parasail.ParaSailLexer¶
+
+
Short names
+

parasail

+
+
Filenames
+

*.psi, *.psl

+
+
MIME types
+

text/x-parasail

+
+
+

For ParaSail source code.

+
+

New in version 2.1.

+
+
+ +
+
+

Lexers for parser generators¶

+
+
+class pygments.lexers.parsers.AntlrActionScriptLexer¶
+
+
Short names
+

antlr-as, antlr-actionscript

+
+
Filenames
+

*.G, *.g

+
+
MIME types
+

None

+
+
+

ANTLR with ActionScript Target

+
+

New in version 1.1.

+
+
+ +
+
+class pygments.lexers.parsers.AntlrCSharpLexer¶
+
+
Short names
+

antlr-csharp, antlr-c#

+
+
Filenames
+

*.G, *.g

+
+
MIME types
+

None

+
+
+

ANTLR with C# Target

+
+

New in version 1.1.

+
+
+ +
+
+class pygments.lexers.parsers.AntlrCppLexer¶
+
+
Short names
+

antlr-cpp

+
+
Filenames
+

*.G, *.g

+
+
MIME types
+

None

+
+
+

ANTLR with CPP Target

+
+

New in version 1.1.

+
+
+ +
+
+class pygments.lexers.parsers.AntlrJavaLexer¶
+
+
Short names
+

antlr-java

+
+
Filenames
+

*.G, *.g

+
+
MIME types
+

None

+
+
+

ANTLR with Java Target

+
+

New in version 1..

+
+
+ +
+
+class pygments.lexers.parsers.AntlrLexer¶
+
+
Short names
+

antlr

+
+
Filenames
+

None

+
+
MIME types
+

None

+
+
+

Generic ANTLR Lexer. +Should not be called directly, instead +use DelegatingLexer for your target language.

+
+

New in version 1.1.

+
+
+ +
+
+class pygments.lexers.parsers.AntlrObjectiveCLexer¶
+
+
Short names
+

antlr-objc

+
+
Filenames
+

*.G, *.g

+
+
MIME types
+

None

+
+
+

ANTLR with Objective-C Target

+
+

New in version 1.1.

+
+
+ +
+
+class pygments.lexers.parsers.AntlrPerlLexer¶
+
+
Short names
+

antlr-perl

+
+
Filenames
+

*.G, *.g

+
+
MIME types
+

None

+
+
+

ANTLR with Perl Target

+
+

New in version 1.1.

+
+
+ +
+
+class pygments.lexers.parsers.AntlrPythonLexer¶
+
+
Short names
+

antlr-python

+
+
Filenames
+

*.G, *.g

+
+
MIME types
+

None

+
+
+

ANTLR with Python Target

+
+

New in version 1.1.

+
+
+ +
+
+class pygments.lexers.parsers.AntlrRubyLexer¶
+
+
Short names
+

antlr-ruby, antlr-rb

+
+
Filenames
+

*.G, *.g

+
+
MIME types
+

None

+
+
+

ANTLR with Ruby Target

+
+

New in version 1.1.

+
+
+ +
+
+class pygments.lexers.parsers.EbnfLexer¶
+
+
Short names
+

ebnf

+
+
Filenames
+

*.ebnf

+
+
MIME types
+

text/x-ebnf

+
+
+

Lexer for ISO/IEC 14977 EBNF +grammars.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.parsers.RagelCLexer¶
+
+
Short names
+

ragel-c

+
+
Filenames
+

*.rl

+
+
MIME types
+

None

+
+
+

A lexer for Ragel in a C host file.

+
+

New in version 1.1.

+
+
+ +
+
+class pygments.lexers.parsers.RagelCppLexer¶
+
+
Short names
+

ragel-cpp

+
+
Filenames
+

*.rl

+
+
MIME types
+

None

+
+
+

A lexer for Ragel in a CPP host file.

+
+

New in version 1.1.

+
+
+ +
+
+class pygments.lexers.parsers.RagelDLexer¶
+
+
Short names
+

ragel-d

+
+
Filenames
+

*.rl

+
+
MIME types
+

None

+
+
+

A lexer for Ragel in a D host file.

+
+

New in version 1.1.

+
+
+ +
+
+class pygments.lexers.parsers.RagelEmbeddedLexer¶
+
+
Short names
+

ragel-em

+
+
Filenames
+

*.rl

+
+
MIME types
+

None

+
+
+

A lexer for Ragel embedded in a host language file.

+

This will only highlight Ragel statements. If you want host language +highlighting then call the language-specific Ragel lexer.

+
+

New in version 1.1.

+
+
+ +
+
+class pygments.lexers.parsers.RagelJavaLexer¶
+
+
Short names
+

ragel-java

+
+
Filenames
+

*.rl

+
+
MIME types
+

None

+
+
+

A lexer for Ragel in a Java host file.

+
+

New in version 1.1.

+
+
+ +
+
+class pygments.lexers.parsers.RagelLexer¶
+
+
Short names
+

ragel

+
+
Filenames
+

None

+
+
MIME types
+

None

+
+
+

A pure Ragel lexer. Use this for +fragments of Ragel. For .rl files, use RagelEmbeddedLexer instead +(or one of the language-specific subclasses).

+
+

New in version 1.1.

+
+
+ +
+
+class pygments.lexers.parsers.RagelObjectiveCLexer¶
+
+
Short names
+

ragel-objc

+
+
Filenames
+

*.rl

+
+
MIME types
+

None

+
+
+

A lexer for Ragel in an Objective C host file.

+
+

New in version 1.1.

+
+
+ +
+
+class pygments.lexers.parsers.RagelRubyLexer¶
+
+
Short names
+

ragel-ruby, ragel-rb

+
+
Filenames
+

*.rl

+
+
MIME types
+

None

+
+
+

A lexer for Ragel in a Ruby host file.

+
+

New in version 1.1.

+
+
+ +
+
+class pygments.lexers.parsers.TreetopLexer¶
+
+
Short names
+

treetop

+
+
Filenames
+

*.treetop, *.tt

+
+
MIME types
+

None

+
+
+

A lexer for Treetop grammars.

+
+

New in version 1.6.

+
+
+ +
+
+

Lexers for Pascal family languages¶

+
+
+class pygments.lexers.pascal.AdaLexer¶
+
+
Short names
+

ada, ada95, ada2005

+
+
Filenames
+

*.adb, *.ads, *.ada

+
+
MIME types
+

text/x-ada

+
+
+

For Ada source code.

+
+

New in version 1.3.

+
+
+ +
+
+class pygments.lexers.pascal.DelphiLexer¶
+
+
Short names
+

delphi, pas, pascal, objectpascal

+
+
Filenames
+

*.pas, *.dpr

+
+
MIME types
+

text/x-pascal

+
+
+

For Delphi (Borland Object Pascal), +Turbo Pascal and Free Pascal source code.

+

Additional options accepted:

+
+
turbopascal

Highlight Turbo Pascal specific keywords (default: True).

+
+
delphi

Highlight Borland Delphi specific keywords (default: True).

+
+
freepascal

Highlight Free Pascal specific keywords (default: True).

+
+
units

A list of units that should be considered builtin, supported are +System, SysUtils, Classes and Math. +Default is to consider all of them builtin.

+
+
+
+ +
+
+

Lexers for the Pawn languages¶

+
+
+class pygments.lexers.pawn.PawnLexer¶
+
+
Short names
+

pawn

+
+
Filenames
+

*.p, *.pwn, *.inc

+
+
MIME types
+

text/x-pawn

+
+
+

For Pawn source code.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.pawn.SourcePawnLexer¶
+
+
Short names
+

sp

+
+
Filenames
+

*.sp

+
+
MIME types
+

text/x-sourcepawn

+
+
+

For SourcePawn source code with preprocessor directives.

+
+

New in version 1.6.

+
+
+ +
+ + + +
+

Lexer for Praat¶

+
+
+class pygments.lexers.praat.PraatLexer¶
+
+
Short names
+

praat

+
+
Filenames
+

*.praat, *.proc, *.psc

+
+
MIME types
+

None

+
+
+

For Praat scripts.

+
+

New in version 2.1.

+
+
+ +
+
+

Lexers for Prolog and Prolog-like languages¶

+
+
+class pygments.lexers.prolog.LogtalkLexer¶
+
+
Short names
+

logtalk

+
+
Filenames
+

*.lgt, *.logtalk

+
+
MIME types
+

text/x-logtalk

+
+
+

For Logtalk source code.

+
+

New in version 0.10.

+
+
+ +
+
+class pygments.lexers.prolog.PrologLexer¶
+
+
Short names
+

prolog

+
+
Filenames
+

*.ecl, *.prolog, *.pro, *.pl

+
+
MIME types
+

text/x-prolog

+
+
+

Lexer for Prolog files.

+
+ +
+ +
+

Lexer for QVT Operational language¶

+
+
+class pygments.lexers.qvt.QVToLexer¶
+
+
Short names
+

qvto, qvt

+
+
Filenames
+

*.qvto

+
+
MIME types
+

None

+
+
+

For the QVT Operational Mapping language.

+

Reference for implementing this: «Meta Object Facility (MOF) 2.0 +Query/View/Transformation Specification», Version 1.1 - January 2011 +(http://www.omg.org/spec/QVT/1.1/), see §8.4, «Concrete Syntax» in +particular.

+

Notable tokens assignments:

+
    +
  • Name.Class is assigned to the identifier following any of the following +keywords: metamodel, class, exception, primitive, enum, transformation +or library

  • +
  • Name.Function is assigned to the names of mappings and queries

  • +
  • Name.Builtin.Pseudo is assigned to the pre-defined variables ‘this’, +‘self’ and ‘result’.

  • +
+
+ +
+
+

Lexers for the R/S languages¶

+
+
+class pygments.lexers.r.RConsoleLexer¶
+
+
Short names
+

rconsole, rout

+
+
Filenames
+

*.Rout

+
+
MIME types
+

None

+
+
+

For R console transcripts or R CMD BATCH output files.

+
+ +
+
+class pygments.lexers.r.RdLexer¶
+
+
Short names
+

rd

+
+
Filenames
+

*.Rd

+
+
MIME types
+

text/x-r-doc

+
+
+

Pygments Lexer for R documentation (Rd) files

+

This is a very minimal implementation, highlighting little more +than the macros. A description of Rd syntax is found in Writing R +Extensions +and Parsing Rd files.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.r.SLexer¶
+
+
Short names
+

splus, s, r

+
+
Filenames
+

*.S, *.R, .Rhistory, .Rprofile, .Renviron

+
+
MIME types
+

text/S-plus, text/S, text/x-r-source, text/x-r, text/x-R, text/x-r-history, text/x-r-profile

+
+
+

For S, S-plus, and R source code.

+
+

New in version 0.10.

+
+
+ +
+
+

Lexers for semantic web and RDF query languages and markup¶

+
+
+class pygments.lexers.rdf.ShExCLexer¶
+
+
Short names
+

shexc, shex

+
+
Filenames
+

*.shex

+
+
MIME types
+

text/shex

+
+
+

Lexer for ShExC shape expressions language syntax.

+
+ +
+
+class pygments.lexers.rdf.SparqlLexer¶
+
+
Short names
+

sparql

+
+
Filenames
+

*.rq, *.sparql

+
+
MIME types
+

application/sparql-query

+
+
+

Lexer for SPARQL query language.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.rdf.TurtleLexer¶
+
+
Short names
+

turtle

+
+
Filenames
+

*.ttl

+
+
MIME types
+

text/turtle, application/x-turtle

+
+
+

Lexer for Turtle data language.

+
+

New in version 2.1.

+
+
+ +
+ +
+

Lexer for resource definition files¶

+
+
+class pygments.lexers.resource.ResourceLexer¶
+
+
Short names
+

resource, resourcebundle

+
+
Filenames
+

None

+
+
MIME types
+

None

+
+
+

Lexer for ICU Resource bundles.

+
+

New in version 2.0.

+
+
+ +
+
+

Lexer for Relax-NG Compact syntax¶

+
+
+class pygments.lexers.rnc.RNCCompactLexer¶
+
+
Short names
+

rnc, rng-compact

+
+
Filenames
+

*.rnc

+
+
MIME types
+

None

+
+
+

For RelaxNG-compact syntax.

+
+

New in version 2.2.

+
+
+ +
+
+

Lexers for Roboconf DSL¶

+
+
+class pygments.lexers.roboconf.RoboconfGraphLexer¶
+
+
Short names
+

roboconf-graph

+
+
Filenames
+

*.graph

+
+
MIME types
+

None

+
+
+

Lexer for Roboconf graph files.

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.roboconf.RoboconfInstancesLexer¶
+
+
Short names
+

roboconf-instances

+
+
Filenames
+

*.instances

+
+
MIME types
+

None

+
+
+

Lexer for Roboconf instances files.

+
+

New in version 2.1.

+
+
+ +
+
+

Lexer for Robot Framework¶

+
+
+class pygments.lexers.robotframework.RobotFrameworkLexer¶
+
+
Short names
+

robotframework

+
+
Filenames
+

*.robot

+
+
MIME types
+

text/x-robotframework

+
+
+

For Robot Framework test data.

+

Supports both space and pipe separated plain text formats.

+
+

New in version 1.6.

+
+
+ +
+ +
+

Lexers for the Rust language¶

+
+
+class pygments.lexers.rust.RustLexer¶
+
+
Short names
+

rust, rs

+
+
Filenames
+

*.rs, *.rs.in

+
+
MIME types
+

text/rust

+
+
+

Lexer for the Rust programming language (version 1.10).

+
+

New in version 1.6.

+
+
+ +
+
+

Lexer for SAS¶

+
+
+class pygments.lexers.sas.SASLexer¶
+
+
Short names
+

sas

+
+
Filenames
+

*.SAS, *.sas

+
+
MIME types
+

text/x-sas, text/sas, application/x-sas

+
+
+

For SAS files.

+
+

New in version 2.2.

+
+
+ +
+
+

Lexer for scdoc, a simple man page generator¶

+
+
+class pygments.lexers.scdoc.ScdocLexer¶
+
+
Short names
+

scdoc, scd

+
+
Filenames
+

*.scd, *.scdoc

+
+
MIME types
+

None

+
+
+

scdoc is a simple man page generator for POSIX systems written in C99. +https://git.sr.ht/~sircmpwn/scdoc

+
+

New in version 2.5.

+
+
+ +
+
+

Lexer for scripting and embedded languages¶

+
+
+class pygments.lexers.scripting.AppleScriptLexer¶
+
+
Short names
+

applescript

+
+
Filenames
+

*.applescript

+
+
MIME types
+

None

+
+
+

For AppleScript source code, +including AppleScript Studio. +Contributed by Andreas Amann <aamann@mac.com>.

+
+

New in version 1.0.

+
+
+ +
+
+class pygments.lexers.scripting.ChaiscriptLexer¶
+
+
Short names
+

chai, chaiscript

+
+
Filenames
+

*.chai

+
+
MIME types
+

text/x-chaiscript, application/x-chaiscript

+
+
+

For ChaiScript source code.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.scripting.EasytrieveLexer¶
+
+
Short names
+

easytrieve

+
+
Filenames
+

*.ezt, *.mac

+
+
MIME types
+

text/x-easytrieve

+
+
+

Easytrieve Plus is a programming language for extracting, filtering and +converting sequential data. Furthermore it can layout data for reports. +It is mainly used on mainframe platforms and can access several of the +mainframe’s native file formats. It is somewhat comparable to awk.

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.scripting.HybrisLexer¶
+
+
Short names
+

hybris, hy

+
+
Filenames
+

*.hy, *.hyb

+
+
MIME types
+

text/x-hybris, application/x-hybris

+
+
+

For Hybris source code.

+
+

New in version 1.4.

+
+
+ +
+
+class pygments.lexers.scripting.JclLexer¶
+
+
Short names
+

jcl

+
+
Filenames
+

*.jcl

+
+
MIME types
+

text/x-jcl

+
+
+

Job Control Language (JCL) +is a scripting language used on mainframe platforms to instruct the system +on how to run a batch job or start a subsystem. It is somewhat +comparable to MS DOS batch and Unix shell scripts.

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.scripting.LSLLexer¶
+
+
Short names
+

lsl

+
+
Filenames
+

*.lsl

+
+
MIME types
+

text/x-lsl

+
+
+

For Second Life’s Linden Scripting Language source code.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.scripting.LuaLexer¶
+
+
Short names
+

lua

+
+
Filenames
+

*.lua, *.wlua

+
+
MIME types
+

text/x-lua, application/x-lua

+
+
+

For Lua source code.

+

Additional options accepted:

+
+
func_name_highlighting

If given and True, highlight builtin function names +(default: True).

+
+
disabled_modules

If given, must be a list of module names whose function names +should not be highlighted. By default all modules are highlighted.

+

To get a list of allowed modules have a look into the +_lua_builtins module:

+
>>> from pygments.lexers._lua_builtins import MODULES
+>>> MODULES.keys()
+['string', 'coroutine', 'modules', 'io', 'basic', ...]
+
+
+
+
+
+ +
+
+class pygments.lexers.scripting.MOOCodeLexer¶
+
+
Short names
+

moocode, moo

+
+
Filenames
+

*.moo

+
+
MIME types
+

text/x-moocode

+
+
+

For MOOCode (the MOO scripting +language).

+
+

New in version 0.9.

+
+
+ +
+
+class pygments.lexers.scripting.MoonScriptLexer¶
+
+
Short names
+

moon, moonscript

+
+
Filenames
+

*.moon

+
+
MIME types
+

text/x-moonscript, application/x-moonscript

+
+
+

For MoonScript source code.

+
+

New in version 1.5.

+
+
+ +
+
+class pygments.lexers.scripting.RexxLexer¶
+
+
Short names
+

rexx, arexx

+
+
Filenames
+

*.rexx, *.rex, *.rx, *.arexx

+
+
MIME types
+

text/x-rexx

+
+
+

Rexx is a scripting language available for +a wide range of different platforms with its roots found on mainframe +systems. It is popular for I/O- and data based tasks and can act as glue +language to bind different applications together.

+
+

New in version 2.0.

+
+
+ +
+
+

Lexer for Smart Game Format (sgf) file format¶

+
+
+class pygments.lexers.sgf.SmartGameFormatLexer¶
+
+
Short names
+

sgf

+
+
Filenames
+

*.sgf

+
+
MIME types
+

None

+
+
+

Lexer for Smart Game Format (sgf) file format.

+

The format is used to store game records of board games for two players +(mainly Go game). +For more information about the definition of the format, see: +https://www.red-bean.com/sgf/

+
+

New in version 2.4.

+
+
+ +
+
+

Lexers for various shells¶

+
+
+class pygments.lexers.shell.BashLexer¶
+
+
Short names
+

bash, sh, ksh, zsh, shell

+
+
Filenames
+

*.sh, *.ksh, *.bash, *.ebuild, *.eclass, *.exheres-0, *.exlib, *.zsh, .bashrc, bashrc, .bash\*, bash\*, zshrc, .zshrc, PKGBUILD

+
+
MIME types
+

application/x-sh, application/x-shellscript, text/x-shellscript

+
+
+

Lexer for (ba|k|z|)sh shell scripts.

+
+

New in version 0.6.

+
+
+ +
+
+class pygments.lexers.shell.BashSessionLexer¶
+
+
Short names
+

console, shell-session

+
+
Filenames
+

*.sh-session, *.shell-session

+
+
MIME types
+

application/x-shell-session, application/x-sh-session

+
+
+

Lexer for simplistic shell sessions.

+
+

New in version 1.1.

+
+
+ +
+
+class pygments.lexers.shell.BatchLexer¶
+
+
Short names
+

bat, batch, dosbatch, winbatch

+
+
Filenames
+

*.bat, *.cmd

+
+
MIME types
+

application/x-dos-batch

+
+
+

Lexer for the DOS/Windows Batch file format.

+
+

New in version 0.7.

+
+
+ +
+
+class pygments.lexers.shell.FishShellLexer¶
+
+
Short names
+

fish, fishshell

+
+
Filenames
+

*.fish, *.load

+
+
MIME types
+

application/x-fish

+
+
+

Lexer for Fish shell scripts.

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.shell.MSDOSSessionLexer¶
+
+
Short names
+

doscon

+
+
Filenames
+

None

+
+
MIME types
+

None

+
+
+

Lexer for simplistic MSDOS sessions.

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.shell.PowerShellLexer¶
+
+
Short names
+

powershell, posh, ps1, psm1

+
+
Filenames
+

*.ps1, *.psm1

+
+
MIME types
+

text/x-powershell

+
+
+

For Windows PowerShell code.

+
+

New in version 1.5.

+
+
+ +
+
+class pygments.lexers.shell.PowerShellSessionLexer¶
+
+
Short names
+

ps1con

+
+
Filenames
+

None

+
+
MIME types
+

None

+
+
+

Lexer for simplistic Windows PowerShell sessions.

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.shell.SlurmBashLexer¶
+
+
Short names
+

slurm, sbatch

+
+
Filenames
+

*.sl

+
+
MIME types
+

None

+
+
+

Lexer for (ba|k|z|)sh Slurm scripts.

+
+

New in version 2.4.

+
+
+ +
+
+class pygments.lexers.shell.TcshLexer¶
+
+
Short names
+

tcsh, csh

+
+
Filenames
+

*.tcsh, *.csh

+
+
MIME types
+

application/x-csh

+
+
+

Lexer for tcsh scripts.

+
+

New in version 0.10.

+
+
+ +
+
+class pygments.lexers.shell.TcshSessionLexer¶
+
+
Short names
+

tcshcon

+
+
Filenames
+

None

+
+
MIME types
+

None

+
+
+

Lexer for Tcsh sessions.

+
+

New in version 2.1.

+
+
+ +
+
+

Lexer for the Slash programming¶

+
+
+class pygments.lexers.slash.SlashLexer¶
+
+
Short names
+

slash

+
+
Filenames
+

*.sl

+
+
MIME types
+

None

+
+
+

Lexer for the Slash programming language.

+
+

New in version 2.4.

+
+
+ +
+ +
+

Lexers for the SMV languages¶

+
+
+class pygments.lexers.smv.NuSMVLexer¶
+
+
Short names
+

nusmv

+
+
Filenames
+

*.smv

+
+
MIME types
+

None

+
+
+

Lexer for the NuSMV language.

+
+

New in version 2.2.

+
+
+ +
+
+

Lexers for the SNOBOL language¶

+
+
+class pygments.lexers.snobol.SnobolLexer¶
+
+
Short names
+

snobol

+
+
Filenames
+

*.snobol

+
+
MIME types
+

text/x-snobol

+
+
+

Lexer for the SNOBOL4 programming language.

+

Recognizes the common ASCII equivalents of the original SNOBOL4 operators. +Does not require spaces around binary operators.

+
+

New in version 1.5.

+
+
+ +
+
+

Lexers for Solidity¶

+
+
+class pygments.lexers.solidity.SolidityLexer¶
+
+
Short names
+

solidity

+
+
Filenames
+

*.sol

+
+
MIME types
+

None

+
+
+

For Solidity source code.

+
+

New in version 2.5.

+
+
+ +
+
+

Special lexers¶

+
+
+class pygments.lexers.special.RawTokenLexer¶
+
+
Short names
+

raw

+
+
Filenames
+

None

+
+
MIME types
+

application/x-pygments-tokens

+
+
+

Recreate a token stream formatted with the RawTokenFormatter. This +lexer raises exceptions during parsing if the token stream in the +file is malformed.

+

Additional options accepted:

+
+
compress

If set to "gz" or "bz2", decompress the token stream with +the given compression algorithm before lexing (default: "").

+
+
+
+ +
+
+class pygments.lexers.special.TextLexer¶
+
+
Short names
+

text

+
+
Filenames
+

*.txt

+
+
MIME types
+

text/plain

+
+
+

“Null” lexer, doesn’t highlight anything.

+
+ +
+ +
+

Lexer for Stata¶

+
+
+class pygments.lexers.stata.StataLexer¶
+
+
Short names
+

stata, do

+
+
Filenames
+

*.do, *.ado

+
+
MIME types
+

text/x-stata, text/stata, application/x-stata

+
+
+

For Stata do files.

+
+

New in version 2.2.

+
+
+ +
+
+

Lexer for SuperCollider¶

+
+
+class pygments.lexers.supercollider.SuperColliderLexer¶
+
+
Short names
+

sc, supercollider

+
+
Filenames
+

*.sc, *.scd

+
+
MIME types
+

application/supercollider, text/supercollider

+
+
+

For SuperCollider source code.

+
+

New in version 2.1.

+
+
+ +
+ +
+

Lexers for various template engines’ markup¶

+
+
+class pygments.lexers.templates.Angular2HtmlLexer¶
+
+
Short names
+

html+ng2

+
+
Filenames
+

*.ng2

+
+
MIME types
+

None

+
+
+

Subclass of the Angular2Lexer that highlights unlexed data with the +HtmlLexer.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.templates.Angular2Lexer¶
+
+
Short names
+

ng2

+
+
Filenames
+

None

+
+
MIME types
+

None

+
+
+

Generic +angular2 +template lexer.

+

Highlights only the Angular template tags (stuff between {{ and }} and +special attributes: ‘(event)=’, ‘[property]=’, ‘[(twoWayBinding)]=’). +Everything else is left for a delegating lexer.

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.templates.CheetahHtmlLexer¶
+
+
Short names
+

html+cheetah, html+spitfire, htmlcheetah

+
+
Filenames
+

None

+
+
MIME types
+

text/html+cheetah, text/html+spitfire

+
+
+

Subclass of the CheetahLexer that highlights unlexed data +with the HtmlLexer.

+
+ +
+
+class pygments.lexers.templates.CheetahJavascriptLexer¶
+
+
Short names
+

js+cheetah, javascript+cheetah, js+spitfire, javascript+spitfire

+
+
Filenames
+

None

+
+
MIME types
+

application/x-javascript+cheetah, text/x-javascript+cheetah, text/javascript+cheetah, application/x-javascript+spitfire, text/x-javascript+spitfire, text/javascript+spitfire

+
+
+

Subclass of the CheetahLexer that highlights unlexed data +with the JavascriptLexer.

+
+ +
+
+class pygments.lexers.templates.CheetahLexer¶
+
+
Short names
+

cheetah, spitfire

+
+
Filenames
+

*.tmpl, *.spt

+
+
MIME types
+

application/x-cheetah, application/x-spitfire

+
+
+

Generic cheetah templates lexer. Code that isn’t Cheetah +markup is yielded as Token.Other. This also works for +spitfire templates which use the same syntax.

+
+ +
+
+class pygments.lexers.templates.CheetahXmlLexer¶
+
+
Short names
+

xml+cheetah, xml+spitfire

+
+
Filenames
+

None

+
+
MIME types
+

application/xml+cheetah, application/xml+spitfire

+
+
+

Subclass of the CheetahLexer that highlights unlexed data +with the XmlLexer.

+
+ +
+
+class pygments.lexers.templates.ColdfusionCFCLexer¶
+
+
Short names
+

cfc

+
+
Filenames
+

*.cfc

+
+
MIME types
+

None

+
+
+

Coldfusion markup/script components

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.templates.ColdfusionHtmlLexer¶
+
+
Short names
+

cfm

+
+
Filenames
+

*.cfm, *.cfml

+
+
MIME types
+

application/x-coldfusion

+
+
+

Coldfusion markup in html

+
+ +
+
+class pygments.lexers.templates.ColdfusionLexer¶
+
+
Short names
+

cfs

+
+
Filenames
+

None

+
+
MIME types
+

None

+
+
+

Coldfusion statements

+
+ +
+
+class pygments.lexers.templates.CssDjangoLexer¶
+
+
Short names
+

css+django, css+jinja

+
+
Filenames
+

None

+
+
MIME types
+

text/css+django, text/css+jinja

+
+
+

Subclass of the DjangoLexer that highlights unlexed data with the +CssLexer.

+
+ +
+
+class pygments.lexers.templates.CssErbLexer¶
+
+
Short names
+

css+erb, css+ruby

+
+
Filenames
+

None

+
+
MIME types
+

text/css+ruby

+
+
+

Subclass of ErbLexer which highlights unlexed data with the CssLexer.

+
+ +
+
+class pygments.lexers.templates.CssGenshiLexer¶
+
+
Short names
+

css+genshitext, css+genshi

+
+
Filenames
+

None

+
+
MIME types
+

text/css+genshi

+
+
+

A lexer that highlights CSS definitions in genshi text templates.

+
+ +
+
+class pygments.lexers.templates.CssPhpLexer¶
+
+
Short names
+

css+php

+
+
Filenames
+

None

+
+
MIME types
+

text/css+php

+
+
+

Subclass of PhpLexer which highlights unmatched data with the CssLexer.

+
+ +
+
+class pygments.lexers.templates.CssSmartyLexer¶
+
+
Short names
+

css+smarty

+
+
Filenames
+

None

+
+
MIME types
+

text/css+smarty

+
+
+

Subclass of the SmartyLexer that highlights unlexed data with the +CssLexer.

+
+ +
+
+class pygments.lexers.templates.DjangoLexer¶
+
+
Short names
+

django, jinja

+
+
Filenames
+

None

+
+
MIME types
+

application/x-django-templating, application/x-jinja

+
+
+

Generic django +and jinja template lexer.

+

It just highlights django/jinja code between the preprocessor directives, +other data is left untouched by the lexer.

+
+ +
+
+class pygments.lexers.templates.ErbLexer¶
+
+
Short names
+

erb

+
+
Filenames
+

None

+
+
MIME types
+

application/x-ruby-templating

+
+
+

Generic ERB (Ruby Templating) +lexer.

+

Just highlights ruby code between the preprocessor directives, other data +is left untouched by the lexer.

+

All options are also forwarded to the RubyLexer.

+
+ +
+
+class pygments.lexers.templates.EvoqueHtmlLexer¶
+
+
Short names
+

html+evoque

+
+
Filenames
+

*.html

+
+
MIME types
+

text/html+evoque

+
+
+

Subclass of the EvoqueLexer that highlights unlexed data with the +HtmlLexer.

+
+

New in version 1.1.

+
+
+ +
+
+class pygments.lexers.templates.EvoqueLexer¶
+
+
Short names
+

evoque

+
+
Filenames
+

*.evoque

+
+
MIME types
+

application/x-evoque

+
+
+

For files using the Evoque templating system.

+
+

New in version 1.1.

+
+
+ +
+
+class pygments.lexers.templates.EvoqueXmlLexer¶
+
+
Short names
+

xml+evoque

+
+
Filenames
+

*.xml

+
+
MIME types
+

application/xml+evoque

+
+
+

Subclass of the EvoqueLexer that highlights unlexed data with the +XmlLexer.

+
+

New in version 1.1.

+
+
+ +
+
+class pygments.lexers.templates.GenshiLexer¶
+
+
Short names
+

genshi, kid, xml+genshi, xml+kid

+
+
Filenames
+

*.kid

+
+
MIME types
+

application/x-genshi, application/x-kid

+
+
+

A lexer that highlights genshi and +kid kid XML templates.

+
+ +
+
+class pygments.lexers.templates.GenshiTextLexer¶
+
+
Short names
+

genshitext

+
+
Filenames
+

None

+
+
MIME types
+

application/x-genshi-text, text/x-genshi

+
+
+

A lexer that highlights genshi text +templates.

+
+ +
+
+class pygments.lexers.templates.HandlebarsHtmlLexer¶
+
+
Short names
+

html+handlebars

+
+
Filenames
+

*.handlebars, *.hbs

+
+
MIME types
+

text/html+handlebars, text/x-handlebars-template

+
+
+

Subclass of the HandlebarsLexer that highlights unlexed data with the +HtmlLexer.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.templates.HandlebarsLexer¶
+
+
Short names
+

handlebars

+
+
Filenames
+

None

+
+
MIME types
+

None

+
+
+

Generic handlebars <http://handlebarsjs.com/> template lexer.

+

Highlights only the Handlebars template tags (stuff between {{ and }}). +Everything else is left for a delegating lexer.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.templates.HtmlDjangoLexer¶
+
+
Short names
+

html+django, html+jinja, htmldjango

+
+
Filenames
+

None

+
+
MIME types
+

text/html+django, text/html+jinja

+
+
+

Subclass of the DjangoLexer that highlights unlexed data with the +HtmlLexer.

+

Nested Javascript and CSS is highlighted too.

+
+ +
+
+class pygments.lexers.templates.HtmlGenshiLexer¶
+
+
Short names
+

html+genshi, html+kid

+
+
Filenames
+

None

+
+
MIME types
+

text/html+genshi

+
+
+

A lexer that highlights genshi and +kid kid HTML templates.

+
+ +
+
+class pygments.lexers.templates.HtmlPhpLexer¶
+
+
Short names
+

html+php

+
+
Filenames
+

*.phtml

+
+
MIME types
+

application/x-php, application/x-httpd-php, application/x-httpd-php3, application/x-httpd-php4, application/x-httpd-php5

+
+
+

Subclass of PhpLexer that highlights unhandled data with the HtmlLexer.

+

Nested Javascript and CSS is highlighted too.

+
+ +
+
+class pygments.lexers.templates.HtmlSmartyLexer¶
+
+
Short names
+

html+smarty

+
+
Filenames
+

None

+
+
MIME types
+

text/html+smarty

+
+
+

Subclass of the SmartyLexer that highlights unlexed data with the +HtmlLexer.

+

Nested Javascript and CSS is highlighted too.

+
+ +
+
+class pygments.lexers.templates.JavascriptDjangoLexer¶
+
+
Short names
+

js+django, javascript+django, js+jinja, javascript+jinja

+
+
Filenames
+

None

+
+
MIME types
+

application/x-javascript+django, application/x-javascript+jinja, text/x-javascript+django, text/x-javascript+jinja, text/javascript+django, text/javascript+jinja

+
+
+

Subclass of the DjangoLexer that highlights unlexed data with the +JavascriptLexer.

+
+ +
+
+class pygments.lexers.templates.JavascriptErbLexer¶
+
+
Short names
+

js+erb, javascript+erb, js+ruby, javascript+ruby

+
+
Filenames
+

None

+
+
MIME types
+

application/x-javascript+ruby, text/x-javascript+ruby, text/javascript+ruby

+
+
+

Subclass of ErbLexer which highlights unlexed data with the +JavascriptLexer.

+
+ +
+
+class pygments.lexers.templates.JavascriptGenshiLexer¶
+
+
Short names
+

js+genshitext, js+genshi, javascript+genshitext, javascript+genshi

+
+
Filenames
+

None

+
+
MIME types
+

application/x-javascript+genshi, text/x-javascript+genshi, text/javascript+genshi

+
+
+

A lexer that highlights javascript code in genshi text templates.

+
+ +
+
+class pygments.lexers.templates.JavascriptPhpLexer¶
+
+
Short names
+

js+php, javascript+php

+
+
Filenames
+

None

+
+
MIME types
+

application/x-javascript+php, text/x-javascript+php, text/javascript+php

+
+
+

Subclass of PhpLexer which highlights unmatched data with the +JavascriptLexer.

+
+ +
+
+class pygments.lexers.templates.JavascriptSmartyLexer¶
+
+
Short names
+

js+smarty, javascript+smarty

+
+
Filenames
+

None

+
+
MIME types
+

application/x-javascript+smarty, text/x-javascript+smarty, text/javascript+smarty

+
+
+

Subclass of the SmartyLexer that highlights unlexed data with the +JavascriptLexer.

+
+ +
+
+class pygments.lexers.templates.JspLexer¶
+
+
Short names
+

jsp

+
+
Filenames
+

*.jsp

+
+
MIME types
+

application/x-jsp

+
+
+

Lexer for Java Server Pages.

+
+

New in version 0.7.

+
+
+ +
+
+class pygments.lexers.templates.LassoCssLexer¶
+
+
Short names
+

css+lasso

+
+
Filenames
+

None

+
+
MIME types
+

text/css+lasso

+
+
+

Subclass of the LassoLexer which highlights unhandled data with the +CssLexer.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.templates.LassoHtmlLexer¶
+
+
Short names
+

html+lasso

+
+
Filenames
+

None

+
+
MIME types
+

text/html+lasso, application/x-httpd-lasso, application/x-httpd-lasso[89]

+
+
+

Subclass of the LassoLexer which highlights unhandled data with the +HtmlLexer.

+

Nested JavaScript and CSS is also highlighted.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.templates.LassoJavascriptLexer¶
+
+
Short names
+

js+lasso, javascript+lasso

+
+
Filenames
+

None

+
+
MIME types
+

application/x-javascript+lasso, text/x-javascript+lasso, text/javascript+lasso

+
+
+

Subclass of the LassoLexer which highlights unhandled data with the +JavascriptLexer.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.templates.LassoXmlLexer¶
+
+
Short names
+

xml+lasso

+
+
Filenames
+

None

+
+
MIME types
+

application/xml+lasso

+
+
+

Subclass of the LassoLexer which highlights unhandled data with the +XmlLexer.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.templates.LiquidLexer¶
+
+
Short names
+

liquid

+
+
Filenames
+

*.liquid

+
+
MIME types
+

None

+
+
+

Lexer for Liquid templates.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.templates.MakoCssLexer¶
+
+
Short names
+

css+mako

+
+
Filenames
+

None

+
+
MIME types
+

text/css+mako

+
+
+

Subclass of the MakoLexer that highlights unlexed data +with the CssLexer.

+
+

New in version 0.7.

+
+
+ +
+
+class pygments.lexers.templates.MakoHtmlLexer¶
+
+
Short names
+

html+mako

+
+
Filenames
+

None

+
+
MIME types
+

text/html+mako

+
+
+

Subclass of the MakoLexer that highlights unlexed data +with the HtmlLexer.

+
+

New in version 0.7.

+
+
+ +
+
+class pygments.lexers.templates.MakoJavascriptLexer¶
+
+
Short names
+

js+mako, javascript+mako

+
+
Filenames
+

None

+
+
MIME types
+

application/x-javascript+mako, text/x-javascript+mako, text/javascript+mako

+
+
+

Subclass of the MakoLexer that highlights unlexed data +with the JavascriptLexer.

+
+

New in version 0.7.

+
+
+ +
+
+class pygments.lexers.templates.MakoLexer¶
+
+
Short names
+

mako

+
+
Filenames
+

*.mao

+
+
MIME types
+

application/x-mako

+
+
+

Generic mako templates lexer. Code that isn’t Mako +markup is yielded as Token.Other.

+
+

New in version 0.7.

+
+
+ +
+
+class pygments.lexers.templates.MakoXmlLexer¶
+
+
Short names
+

xml+mako

+
+
Filenames
+

None

+
+
MIME types
+

application/xml+mako

+
+
+

Subclass of the MakoLexer that highlights unlexed data +with the XmlLexer.

+
+

New in version 0.7.

+
+
+ +
+
+class pygments.lexers.templates.MasonLexer¶
+
+
Short names
+

mason

+
+
Filenames
+

*.m, *.mhtml, *.mc, *.mi, autohandler, dhandler

+
+
MIME types
+

application/x-mason

+
+
+

Generic mason templates lexer. Stolen from Myghty lexer. Code that isn’t +Mason markup is HTML.

+
+

New in version 1.4.

+
+
+ +
+
+class pygments.lexers.templates.MyghtyCssLexer¶
+
+
Short names
+

css+myghty

+
+
Filenames
+

None

+
+
MIME types
+

text/css+myghty

+
+
+

Subclass of the MyghtyLexer that highlights unlexed data +with the CssLexer.

+
+

New in version 0.6.

+
+
+ +
+
+class pygments.lexers.templates.MyghtyHtmlLexer¶
+
+
Short names
+

html+myghty

+
+
Filenames
+

None

+
+
MIME types
+

text/html+myghty

+
+
+

Subclass of the MyghtyLexer that highlights unlexed data +with the HtmlLexer.

+
+

New in version 0.6.

+
+
+ +
+
+class pygments.lexers.templates.MyghtyJavascriptLexer¶
+
+
Short names
+

js+myghty, javascript+myghty

+
+
Filenames
+

None

+
+
MIME types
+

application/x-javascript+myghty, text/x-javascript+myghty, text/javascript+mygthy

+
+
+

Subclass of the MyghtyLexer that highlights unlexed data +with the JavascriptLexer.

+
+

New in version 0.6.

+
+
+ +
+
+class pygments.lexers.templates.MyghtyLexer¶
+
+
Short names
+

myghty

+
+
Filenames
+

*.myt, autodelegate

+
+
MIME types
+

application/x-myghty

+
+
+

Generic myghty templates lexer. Code that isn’t Myghty +markup is yielded as Token.Other.

+
+

New in version 0.6.

+
+
+ +
+
+class pygments.lexers.templates.MyghtyXmlLexer¶
+
+
Short names
+

xml+myghty

+
+
Filenames
+

None

+
+
MIME types
+

application/xml+myghty

+
+
+

Subclass of the MyghtyLexer that highlights unlexed data +with the XmlLexer.

+
+

New in version 0.6.

+
+
+ +
+
+class pygments.lexers.templates.RhtmlLexer¶
+
+
Short names
+

rhtml, html+erb, html+ruby

+
+
Filenames
+

*.rhtml

+
+
MIME types
+

text/html+ruby

+
+
+

Subclass of the ERB lexer that highlights the unlexed data with the +html lexer.

+

Nested Javascript and CSS is highlighted too.

+
+ +
+
+class pygments.lexers.templates.SmartyLexer¶
+
+
Short names
+

smarty

+
+
Filenames
+

*.tpl

+
+
MIME types
+

application/x-smarty

+
+
+

Generic Smarty template lexer.

+

Just highlights smarty code between the preprocessor directives, other +data is left untouched by the lexer.

+
+ +
+
+class pygments.lexers.templates.SspLexer¶
+
+
Short names
+

ssp

+
+
Filenames
+

*.ssp

+
+
MIME types
+

application/x-ssp

+
+
+

Lexer for Scalate Server Pages.

+
+

New in version 1.4.

+
+
+ +
+
+class pygments.lexers.templates.TeaTemplateLexer¶
+
+
Short names
+

tea

+
+
Filenames
+

*.tea

+
+
MIME types
+

text/x-tea

+
+
+

Lexer for Tea Templates.

+
+

New in version 1.5.

+
+
+ +
+
+class pygments.lexers.templates.TwigHtmlLexer¶
+
+
Short names
+

html+twig

+
+
Filenames
+

*.twig

+
+
MIME types
+

text/html+twig

+
+
+

Subclass of the TwigLexer that highlights unlexed data with the +HtmlLexer.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.templates.TwigLexer¶
+
+
Short names
+

twig

+
+
Filenames
+

None

+
+
MIME types
+

application/x-twig

+
+
+

Twig template lexer.

+

It just highlights Twig code between the preprocessor directives, +other data is left untouched by the lexer.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.templates.VelocityHtmlLexer¶
+
+
Short names
+

html+velocity

+
+
Filenames
+

None

+
+
MIME types
+

text/html+velocity

+
+
+

Subclass of the VelocityLexer that highlights unlexed data +with the HtmlLexer.

+
+ +
+
+class pygments.lexers.templates.VelocityLexer¶
+
+
Short names
+

velocity

+
+
Filenames
+

*.vm, *.fhtml

+
+
MIME types
+

None

+
+
+

Generic Velocity template lexer.

+

Just highlights velocity directives and variable references, other +data is left untouched by the lexer.

+
+ +
+
+class pygments.lexers.templates.VelocityXmlLexer¶
+
+
Short names
+

xml+velocity

+
+
Filenames
+

None

+
+
MIME types
+

application/xml+velocity

+
+
+

Subclass of the VelocityLexer that highlights unlexed data +with the XmlLexer.

+
+ +
+
+class pygments.lexers.templates.XmlDjangoLexer¶
+
+
Short names
+

xml+django, xml+jinja

+
+
Filenames
+

None

+
+
MIME types
+

application/xml+django, application/xml+jinja

+
+
+

Subclass of the DjangoLexer that highlights unlexed data with the +XmlLexer.

+
+ +
+
+class pygments.lexers.templates.XmlErbLexer¶
+
+
Short names
+

xml+erb, xml+ruby

+
+
Filenames
+

None

+
+
MIME types
+

application/xml+ruby

+
+
+

Subclass of ErbLexer which highlights data outside preprocessor +directives with the XmlLexer.

+
+ +
+
+class pygments.lexers.templates.XmlPhpLexer¶
+
+
Short names
+

xml+php

+
+
Filenames
+

None

+
+
MIME types
+

application/xml+php

+
+
+

Subclass of PhpLexer that highlights unhandled data with the XmlLexer.

+
+ +
+
+class pygments.lexers.templates.XmlSmartyLexer¶
+
+
Short names
+

xml+smarty

+
+
Filenames
+

None

+
+
MIME types
+

application/xml+smarty

+
+
+

Subclass of the SmartyLexer that highlights unlexed data with the +XmlLexer.

+
+ +
+
+class pygments.lexers.templates.YamlJinjaLexer¶
+
+
Short names
+

yaml+jinja, salt, sls

+
+
Filenames
+

*.sls

+
+
MIME types
+

text/x-yaml+jinja, text/x-sls

+
+
+

Subclass of the DjangoLexer that highlights unlexed data with the +YamlLexer.

+

Commonly used in Saltstack salt states.

+
+

New in version 2.0.

+
+
+ +
+
+

Lexer for Tera Term macro files¶

+
+
+class pygments.lexers.teraterm.TeraTermLexer¶
+
+
Short names
+

ttl, teraterm, teratermmacro

+
+
Filenames
+

*.ttl

+
+
MIME types
+

text/x-teratermmacro

+
+
+

For Tera Term macro source code.

+
+

New in version 2.4.

+
+
+ +
+
+

Lexers for testing languages¶

+
+
+class pygments.lexers.testing.GherkinLexer¶
+
+
Short names
+

cucumber, gherkin

+
+
Filenames
+

*.feature

+
+
MIME types
+

text/x-gherkin

+
+
+

For Gherkin <http://github.com/aslakhellesoy/gherkin/> syntax.

+
+

New in version 1.2.

+
+
+ +
+
+class pygments.lexers.testing.TAPLexer¶
+
+
Short names
+

tap

+
+
Filenames
+

*.tap

+
+
MIME types
+

None

+
+
+

For Test Anything Protocol (TAP) output.

+
+

New in version 2.1.

+
+
+ +
+ +
+

Lexers for various text formats¶

+
+
+class pygments.lexers.textfmts.GettextLexer¶
+
+
Short names
+

pot, po

+
+
Filenames
+

*.pot, *.po

+
+
MIME types
+

application/x-gettext, text/x-gettext, text/gettext

+
+
+

Lexer for Gettext catalog files.

+
+

New in version 0.9.

+
+
+ +
+
+class pygments.lexers.textfmts.HttpLexer¶
+
+
Short names
+

http

+
+
Filenames
+

None

+
+
MIME types
+

None

+
+
+

Lexer for HTTP sessions.

+
+

New in version 1.5.

+
+
+ +
+
+class pygments.lexers.textfmts.IrcLogsLexer¶
+
+
Short names
+

irc

+
+
Filenames
+

*.weechatlog

+
+
MIME types
+

text/x-irclog

+
+
+

Lexer for IRC logs in irssi, xchat or weechat style.

+
+ +
+
+class pygments.lexers.textfmts.NotmuchLexer¶
+
+
Short names
+

notmuch

+
+
Filenames
+

None

+
+
MIME types
+

None

+
+
+

For Notmuch email text format.

+
+

New in version 2.5.

+
+

Additional options accepted:

+
+
body_lexer

If given, highlight the contents of the message body with the specified +lexer, else guess it according to the body content (default: None).

+
+
+
+ +
+
+class pygments.lexers.textfmts.TodotxtLexer¶
+
+
Short names
+

todotxt

+
+
Filenames
+

todo.txt, *.todotxt

+
+
MIME types
+

text/x-todo

+
+
+

Lexer for Todo.txt todo list format.

+
+

New in version 2.0.

+
+
+ +
+
+

Lexers for theorem-proving languages¶

+
+
+class pygments.lexers.theorem.CoqLexer¶
+
+
Short names
+

coq

+
+
Filenames
+

*.v

+
+
MIME types
+

text/x-coq

+
+
+

For the Coq theorem prover.

+
+

New in version 1.5.

+
+
+ +
+
+class pygments.lexers.theorem.IsabelleLexer¶
+
+
Short names
+

isabelle

+
+
Filenames
+

*.thy

+
+
MIME types
+

text/x-isabelle

+
+
+

For the Isabelle proof assistant.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.theorem.LeanLexer¶
+
+
Short names
+

lean

+
+
Filenames
+

*.lean

+
+
MIME types
+

text/x-lean

+
+
+

For the Lean +theorem prover.

+
+

New in version 2.0.

+
+
+ +
+
+

Lexer for RiverBed’s TrafficScript (RTS) language¶

+
+
+class pygments.lexers.trafficscript.RtsLexer¶
+
+
Short names
+

rts, trafficscript

+
+
Filenames
+

*.rts

+
+
MIME types
+

None

+
+
+

For Riverbed Stingray Traffic Manager

+
+

New in version 2.1.

+
+
+ +
+
+

Lexers for TypoScript¶

+
+
+class pygments.lexers.typoscript.TypoScriptCssDataLexer¶
+
+
Short names
+

typoscriptcssdata

+
+
Filenames
+

None

+
+
MIME types
+

None

+
+
+

Lexer that highlights markers, constants and registers within css blocks.

+
+

New in version 2.2.

+
+
+ +
+
+class pygments.lexers.typoscript.TypoScriptHtmlDataLexer¶
+
+
Short names
+

typoscripthtmldata

+
+
Filenames
+

None

+
+
MIME types
+

None

+
+
+

Lexer that highlights markers, constants and registers within html tags.

+
+

New in version 2.2.

+
+
+ +
+
+class pygments.lexers.typoscript.TypoScriptLexer¶
+
+
Short names
+

typoscript

+
+
Filenames
+

*.typoscript

+
+
MIME types
+

text/x-typoscript

+
+
+

Lexer for TypoScript code.

+

http://docs.typo3.org/typo3cms/TyposcriptReference/

+
+

New in version 2.2.

+
+
+ +
+
+

Lexers for the Icon and Unicon languages, including ucode VM¶

+
+
+class pygments.lexers.unicon.IconLexer¶
+
+
Short names
+

icon

+
+
Filenames
+

*.icon, *.ICON

+
+
MIME types
+

None

+
+
+

Lexer for Icon.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.unicon.UcodeLexer¶
+
+
Short names
+

ucode

+
+
Filenames
+

*.u, *.u1, *.u2

+
+
MIME types
+

None

+
+
+

Lexer for Icon ucode files.

+
+

New in version 2.4.

+
+
+ +
+
+class pygments.lexers.unicon.UniconLexer¶
+
+
Short names
+

unicon

+
+
Filenames
+

*.icn

+
+
MIME types
+

text/unicon

+
+
+

For Unicon source code.

+
+

New in version 2.4.

+
+
+ +
+
+

Lexers for UrbiScript language¶

+
+
+class pygments.lexers.urbi.UrbiscriptLexer¶
+
+
Short names
+

urbiscript

+
+
Filenames
+

*.u

+
+
MIME types
+

application/x-urbiscript

+
+
+

For UrbiScript source code.

+
+

New in version 1.5.

+
+
+ +
+
+

Lexers for Varnish configuration¶

+
+
+class pygments.lexers.varnish.VCLLexer¶
+
+
Short names
+

vcl

+
+
Filenames
+

*.vcl

+
+
MIME types
+

text/x-vclsrc

+
+
+

For Varnish Configuration Language (VCL).

+
+

New in version 2.2.

+
+
+ +
+
+class pygments.lexers.varnish.VCLSnippetLexer¶
+
+
Short names
+

vclsnippets, vclsnippet

+
+
Filenames
+

None

+
+
MIME types
+

text/x-vclsnippet

+
+
+

For Varnish Configuration Language snippets.

+
+

New in version 2.2.

+
+
+ +
+
+

Lexer for Intermediate Verification Languages (IVLs)¶

+
+
+class pygments.lexers.verification.BoogieLexer¶
+
+
Short names
+

boogie

+
+
Filenames
+

*.bpl

+
+
MIME types
+

None

+
+
+

For Boogie source code.

+
+

New in version 2.1.

+
+
+ +
+
+class pygments.lexers.verification.SilverLexer¶
+
+
Short names
+

silver

+
+
Filenames
+

*.sil, *.vpr

+
+
MIME types
+

None

+
+
+

For Silver source code.

+
+

New in version 2.2.

+
+
+ +
+
+

Lexers for misc. web stuff¶

+
+
+class pygments.lexers.webmisc.CirruLexer¶
+
+
Short names
+

cirru

+
+
Filenames
+

*.cirru

+
+
MIME types
+

text/x-cirru

+
+
+

Syntax rules of Cirru can be found at: +http://cirru.org/

+
    +
  • using () for expressions, but restricted in a same line

  • +
  • using "" for strings, with \ for escaping chars

  • +
  • using $ as folding operator

  • +
  • using , as unfolding operator

  • +
  • using indentations for nested blocks

  • +
+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.webmisc.DuelLexer¶
+
+
Short names
+

duel, jbst, jsonml+bst

+
+
Filenames
+

*.duel, *.jbst

+
+
MIME types
+

text/x-duel, text/x-jbst

+
+
+

Lexer for Duel Views Engine (formerly JBST) markup with JavaScript code blocks. +See http://duelengine.org/. +See http://jsonml.org/jbst/.

+
+

New in version 1.4.

+
+
+ +
+
+class pygments.lexers.webmisc.QmlLexer¶
+
+
Short names
+

qml, qbs

+
+
Filenames
+

*.qml, *.qbs

+
+
MIME types
+

application/x-qml, application/x-qt.qbs+qml

+
+
+

For QML files. See http://doc.qt.digia.com/4.7/qdeclarativeintroduction.html.

+
+

New in version 1.6.

+
+
+ +
+
+class pygments.lexers.webmisc.SlimLexer¶
+
+
Short names
+

slim

+
+
Filenames
+

*.slim

+
+
MIME types
+

text/x-slim

+
+
+

For Slim markup.

+
+

New in version 2.0.

+
+
+ +
+
+class pygments.lexers.webmisc.XQueryLexer¶
+
+
Short names
+

xquery, xqy, xq, xql, xqm

+
+
Filenames
+

*.xqy, *.xquery, *.xq, *.xql, *.xqm

+
+
MIME types
+

text/xquery, application/xquery

+
+
+

An XQuery lexer, parsing a stream and outputting the tokens needed to +highlight xquery code.

+
+

New in version 1.4.

+
+
+ +
+
+

Lexers for the Whiley language¶

+
+
+class pygments.lexers.whiley.WhileyLexer¶
+
+
Short names
+

whiley

+
+
Filenames
+

*.whiley

+
+
MIME types
+

text/x-whiley

+
+
+

Lexer for the Whiley programming language.

+
+

New in version 2.2.

+
+
+ +
+
+

Lexers for the X10 programming language¶

+
+
+class pygments.lexers.x10.X10Lexer¶
+
+
Short names
+

x10, xten

+
+
Filenames
+

*.x10

+
+
MIME types
+

text/x-x10

+
+
+

For the X10 language.

+
+

New in version 0.1.

+
+
+ +
+
+

Lexers for Xorg configs¶

+
+
+class pygments.lexers.xorg.XorgLexer¶
+
+
Short names
+

xorg.conf

+
+
Filenames
+

xorg.conf

+
+
MIME types
+

None

+
+
+

Lexer for xorg.conf file.

+
+ +
+
+

Lexers for Zig¶

+
+
+class pygments.lexers.zig.ZigLexer¶
+
+
Short names
+

zig

+
+
Filenames
+

*.zig

+
+
MIME types
+

text/zig

+
+
+

For Zig source code.

+

grammar: https://ziglang.org/documentation/master/#Grammar

+
+ +
+
+

Iterating over all lexers¶

+
+

New in version 0.6.

+
+

To get all lexers (both the builtin and the plugin ones), you can +use the get_all_lexers() function from the pygments.lexers +module:

+
>>> from pygments.lexers import get_all_lexers
+>>> i = get_all_lexers()
+>>> i.next()
+('Diff', ('diff',), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch'))
+>>> i.next()
+('Delphi', ('delphi', 'objectpascal', 'pas', 'pascal'), ('*.pas',), ('text/x-pascal',))
+>>> i.next()
+('XML+Ruby', ('xml+erb', 'xml+ruby'), (), ())
+
+
+

As you can see, the return value is an iterator which yields tuples +in the form (name, aliases, filetypes, mimetypes).

+
+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/docs/moinmoin.html b/doc/_build/html/docs/moinmoin.html new file mode 100644 index 0000000..e3d72b9 --- /dev/null +++ b/doc/_build/html/docs/moinmoin.html @@ -0,0 +1,154 @@ + + + + + + + Using Pygments with MoinMoin — Pygments + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Using Pygments with MoinMoin¶

+

From Pygments 0.7, the source distribution ships a Moin parser plugin that +can be used to get Pygments highlighting in Moin wiki pages.

+

To use it, copy the file external/moin-parser.py from the Pygments +distribution to the data/plugin/parser subdirectory of your Moin instance. +Edit the options at the top of the file (currently ATTACHMENTS and +INLINESTYLES) and rename the file to the name that the parser directive +should have. For example, if you name the file code.py, you can get a +highlighted Python code sample with this Wiki markup:

+
{{{
+#!code python
+[...]
+}}}
+
+
+

where python is the Pygments name of the lexer to use.

+

Additionally, if you set the ATTACHMENTS option to True, Pygments will also +be called for all attachments for whose filenames there is no other parser +registered.

+

You are responsible for including CSS rules that will map the Pygments CSS +classes to colors. You can output a stylesheet file with pygmentize, put it +into the htdocs directory of your Moin instance and then include it in the +stylesheets configuration option in the Moin config, e.g.:

+
stylesheets = [('screen', '/htdocs/pygments.css')]
+
+
+

If you do not want to do that and are willing to accept larger HTML output, you +can set the INLINESTYLES option to True.

+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/docs/plugins.html b/doc/_build/html/docs/plugins.html new file mode 100644 index 0000000..2cb93bf --- /dev/null +++ b/doc/_build/html/docs/plugins.html @@ -0,0 +1,206 @@ + + + + + + + Register Plugins — Pygments + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Register Plugins¶

+

If you want to extend Pygments without hacking the sources, but want to +use the lexer/formatter/style/filter lookup functions (lexers.get_lexer_by_name +et al.), you can use setuptools entrypoints to add new lexers, formatters +or styles as if they were in the Pygments core.

+

That means you can use your highlighter modules with the pygmentize script, +which relies on the mentioned functions.

+
+

Entrypoints¶

+

Here is a list of setuptools entrypoints that Pygments understands:

+

pygments.lexers

+
+

This entrypoint is used for adding new lexers to the Pygments core. +The name of the entrypoint values doesn’t really matter, Pygments extracts +required metadata from the class definition:

+
[pygments.lexers]
+yourlexer = yourmodule:YourLexer
+
+
+

Note that you have to define name, aliases and filename +attributes so that you can use the highlighter from the command line:

+
class YourLexer(...):
+    name = 'Name Of Your Lexer'
+    aliases = ['alias']
+    filenames = ['*.ext']
+
+
+
+

pygments.formatters

+
+

You can use this entrypoint to add new formatters to Pygments. The +name of an entrypoint item is the name of the formatter. If you +prefix the name with a slash it’s used as a filename pattern:

+
[pygments.formatters]
+yourformatter = yourmodule:YourFormatter
+/.ext = yourmodule:YourFormatter
+
+
+
+

pygments.styles

+
+

To add a new style you can use this entrypoint. The name of the entrypoint +is the name of the style:

+
[pygments.styles]
+yourstyle = yourmodule:YourStyle
+
+
+
+

pygments.filters

+
+

Use this entrypoint to register a new filter. The name of the +entrypoint is the name of the filter:

+
[pygments.filters]
+yourfilter = yourmodule:YourFilter
+
+
+
+
+
+

How To Use Entrypoints¶

+

This documentation doesn’t explain how to use those entrypoints because this is +covered in the setuptools documentation. That page should cover everything +you need to write a plugin.

+
+
+

Extending The Core¶

+

If you have written a Pygments plugin that is open source, please inform us +about that. There is a high chance that we’ll add it to the Pygments +distribution.

+
+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/docs/quickstart.html b/doc/_build/html/docs/quickstart.html new file mode 100644 index 0000000..fa259ba --- /dev/null +++ b/doc/_build/html/docs/quickstart.html @@ -0,0 +1,302 @@ + + + + + + + Introduction and Quickstart — Pygments + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Introduction and Quickstart¶

+

Welcome to Pygments! This document explains the basic concepts and terms and +gives a few examples of how to use the library.

+
+

Architecture¶

+

There are four types of components that work together highlighting a piece of +code:

+
    +
  • A lexer splits the source into tokens, fragments of the source that +have a token type that determines what the text represents semantically +(e.g., keyword, string, or comment). There is a lexer for every language +or markup format that Pygments supports.

  • +
  • The token stream can be piped through filters, which usually modify +the token types or text fragments, e.g. uppercasing all keywords.

  • +
  • A formatter then takes the token stream and writes it to an output +file, in a format such as HTML, LaTeX or RTF.

  • +
  • While writing the output, a style determines how to highlight all the +different token types. It maps them to attributes like “red and bold”.

  • +
+
+
+

Example¶

+

Here is a small example for highlighting Python code:

+
from pygments import highlight
+from pygments.lexers import PythonLexer
+from pygments.formatters import HtmlFormatter
+
+code = 'print "Hello World"'
+print(highlight(code, PythonLexer(), HtmlFormatter()))
+
+
+

which prints something like this:

+
<div class="highlight">
+<pre><span class="k">print</span> <span class="s">&quot;Hello World&quot;</span></pre>
+</div>
+
+
+

As you can see, Pygments uses CSS classes (by default, but you can change that) +instead of inline styles in order to avoid outputting redundant style information over +and over. A CSS stylesheet that contains all CSS classes possibly used in the output +can be produced by:

+
print(HtmlFormatter().get_style_defs('.highlight'))
+
+
+

The argument to get_style_defs() is used as an additional CSS selector: +the output may look like this:

+
.highlight .k { color: #AA22FF; font-weight: bold }
+.highlight .s { color: #BB4444 }
+...
+
+
+
+
+

Options¶

+

The highlight() function supports a fourth argument called outfile, it +must be a file object if given. The formatted output will then be written to +this file instead of being returned as a string.

+

Lexers and formatters both support options. They are given to them as keyword +arguments either to the class or to the lookup method:

+
from pygments import highlight
+from pygments.lexers import get_lexer_by_name
+from pygments.formatters import HtmlFormatter
+
+lexer = get_lexer_by_name("python", stripall=True)
+formatter = HtmlFormatter(linenos=True, cssclass="source")
+result = highlight(code, lexer, formatter)
+
+
+

This makes the lexer strip all leading and trailing whitespace from the input +(stripall option), lets the formatter output line numbers (linenos option), +and sets the wrapping <div>’s class to source (instead of +highlight).

+

Important options include:

+
+
encodingfor lexers and formatters

Since Pygments uses Unicode strings internally, this determines which +encoding will be used to convert to or from byte strings.

+
+
stylefor formatters

The name of the style to use when writing the output.

+
+
+

For an overview of builtin lexers and formatters and their options, visit the +lexer and formatters lists.

+

For a documentation on filters, see this page.

+
+
+

Lexer and formatter lookup¶

+

If you want to lookup a built-in lexer by its alias or a filename, you can use +one of the following methods:

+
>>> from pygments.lexers import (get_lexer_by_name,
+...     get_lexer_for_filename, get_lexer_for_mimetype)
+
+>>> get_lexer_by_name('python')
+<pygments.lexers.PythonLexer>
+
+>>> get_lexer_for_filename('spam.rb')
+<pygments.lexers.RubyLexer>
+
+>>> get_lexer_for_mimetype('text/x-perl')
+<pygments.lexers.PerlLexer>
+
+
+

All these functions accept keyword arguments; they will be passed to the lexer +as options.

+

A similar API is available for formatters: use get_formatter_by_name() +and get_formatter_for_filename() from the pygments.formatters +module for this purpose.

+
+
+

Guessing lexers¶

+

If you don’t know the content of the file, or you want to highlight a file +whose extension is ambiguous, such as .html (which could contain plain HTML +or some template tags), use these functions:

+
>>> from pygments.lexers import guess_lexer, guess_lexer_for_filename
+
+>>> guess_lexer('#!/usr/bin/python\nprint "Hello World!"')
+<pygments.lexers.PythonLexer>
+
+>>> guess_lexer_for_filename('test.py', 'print "Hello World!"')
+<pygments.lexers.PythonLexer>
+
+
+

guess_lexer() passes the given content to the lexer classes’ +analyse_text() method and returns the one for which it returns the +highest number.

+

All lexers have two different filename pattern lists: the primary and the +secondary one. The get_lexer_for_filename() function only uses the +primary list, whose entries are supposed to be unique among all lexers. +guess_lexer_for_filename(), however, will first loop through all lexers +and look at the primary and secondary filename patterns if the filename matches. +If only one lexer matches, it is returned, else the guessing mechanism of +guess_lexer() is used with the matching lexers.

+

As usual, keyword arguments to these functions are given to the created lexer +as options.

+
+
+

Command line usage¶

+

You can use Pygments from the command line, using the pygmentize +script:

+
$ pygmentize test.py
+
+
+

will highlight the Python file test.py using ANSI escape sequences +(a.k.a. terminal colors) and print the result to standard output.

+

To output HTML, use the -f option:

+
$ pygmentize -f html -o test.html test.py
+
+
+

to write an HTML-highlighted version of test.py to the file test.html. +Note that it will only be a snippet of HTML, if you want a full HTML document, +use the “full” option:

+
$ pygmentize -f html -O full -o test.html test.py
+
+
+

This will produce a full HTML document with included stylesheet.

+

A style can be selected with -O style=<name>.

+

If you need a stylesheet for an existing HTML file using Pygments CSS classes, +it can be created with:

+
$ pygmentize -S default -f html > style.css
+
+
+

where default is the style name.

+

More options and tricks and be found in the command line reference.

+
+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/docs/rstdirective.html b/doc/_build/html/docs/rstdirective.html new file mode 100644 index 0000000..662330d --- /dev/null +++ b/doc/_build/html/docs/rstdirective.html @@ -0,0 +1,134 @@ + + + + + + + Using Pygments in ReST documents — Pygments + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Using Pygments in ReST documents¶

+

Many Python people use ReST for documentation their sourcecode, programs, +scripts et cetera. This also means that documentation often includes sourcecode +samples or snippets.

+

You can easily enable Pygments support for your ReST texts using a custom +directive – this is also how this documentation displays source code.

+

From Pygments 0.9, the directive is shipped in the distribution as +external/rst-directive.py. You can copy and adapt this code to your liking.

+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/docs/styles.html b/doc/_build/html/docs/styles.html new file mode 100644 index 0000000..00f7d27 --- /dev/null +++ b/doc/_build/html/docs/styles.html @@ -0,0 +1,373 @@ + + + + + + + Styles — Pygments + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Styles¶

+

Pygments comes with some builtin styles that work for both the HTML and +LaTeX formatter.

+

The builtin styles can be looked up with the get_style_by_name function:

+
>>> from pygments.styles import get_style_by_name
+>>> get_style_by_name('colorful')
+<class 'pygments.styles.colorful.ColorfulStyle'>
+
+
+

You can pass a instance of a Style class to a formatter as the style +option in form of a string:

+
>>> from pygments.styles import get_style_by_name
+>>> from pygments.formatters import HtmlFormatter
+>>> HtmlFormatter(style='colorful').style
+<class 'pygments.styles.colorful.ColorfulStyle'>
+
+
+

Or you can also import your own style (which must be a subclass of +pygments.style.Style) and pass it to the formatter:

+
>>> from yourapp.yourmodule import YourStyle
+>>> from pygments.formatters import HtmlFormatter
+>>> HtmlFormatter(style=YourStyle).style
+<class 'yourapp.yourmodule.YourStyle'>
+
+
+
+

Creating Own Styles¶

+

So, how to create a style? All you have to do is to subclass Style and +define some styles:

+
from pygments.style import Style
+from pygments.token import Keyword, Name, Comment, String, Error, \
+     Number, Operator, Generic
+
+class YourStyle(Style):
+    default_style = ""
+    styles = {
+        Comment:                'italic #888',
+        Keyword:                'bold #005',
+        Name:                   '#f00',
+        Name.Function:          '#0f0',
+        Name.Class:             'bold #0f0',
+        String:                 'bg:#eee #111'
+    }
+
+
+

That’s it. There are just a few rules. When you define a style for Name +the style automatically also affects Name.Function and so on. If you +defined 'bold' and you don’t want boldface for a subtoken use 'nobold'.

+

(Philosophy: the styles aren’t written in CSS syntax since this way +they can be used for a variety of formatters.)

+

default_style is the style inherited by all token types.

+

To make the style usable for Pygments, you must

+
    +
  • either register it as a plugin (see the plugin docs)

  • +
  • or drop it into the styles subpackage of your Pygments distribution one style +class per style, where the file name is the style name and the class name is +StylenameClass. For example, if your style should be called +"mondrian", name the class MondrianStyle, put it into the file +mondrian.py and this file into the pygments.styles subpackage +directory.

  • +
+
+
+

Style Rules¶

+

Here a small overview of all allowed styles:

+
+
bold

render text as bold

+
+
nobold

don’t render text as bold (to prevent subtokens being highlighted bold)

+
+
italic

render text italic

+
+
noitalic

don’t render text as italic

+
+
underline

render text underlined

+
+
nounderline

don’t render text underlined

+
+
bg:

transparent background

+
+
bg:#000000

background color (black)

+
+
border:

no border

+
+
border:#ffffff

border color (white)

+
+
#ff0000

text color (red)

+
+
noinherit

don’t inherit styles from supertoken

+
+
+

Note that there may not be a space between bg: and the color value +since the style definition string is split at whitespace. +Also, using named colors is not allowed since the supported color names +vary for different formatters.

+

Furthermore, not all lexers might support every style.

+
+
+

Builtin Styles¶

+

Pygments ships some builtin styles which are maintained by the Pygments team.

+

To get a list of known styles you can use this snippet:

+
>>> from pygments.styles import STYLE_MAP
+>>> STYLE_MAP.keys()
+['default', 'emacs', 'friendly', 'colorful']
+
+
+
+
+

Getting a list of available styles¶

+
+

New in version 0.6.

+
+

Because it could be that a plugin registered a style, there is +a way to iterate over all styles:

+
>>> from pygments.styles import get_all_styles
+>>> styles = list(get_all_styles())
+
+
+
+
+

Terminal Styles¶

+
+

New in version 2.2.

+
+

Custom styles used with the 256-color terminal formatter can also map colors to +use the 8 default ANSI colors. To do so, use ansigreen, ansibrightred or +any other colors defined in pygments.style.ansicolors. Foreground ANSI +colors will be mapped to the corresponding escape codes 30 to 37 thus respecting any +custom color mapping and themes provided by many terminal emulators. Light +variants are treated as foreground color with and an added bold flag. +bg:ansi<color> will also be respected, except the light variant will be the +same shade as their dark variant.

+

See the following example where the color of the string "hello world" is +governed by the escape sequence \x1b[34;01m (Ansi bright blue, Bold, 41 being red +background) instead of an extended foreground & background color.

+
>>> from pygments import highlight
+>>> from pygments.style import Style
+>>> from pygments.token import Token
+>>> from pygments.lexers import Python3Lexer
+>>> from pygments.formatters import Terminal256Formatter
+
+>>> class MyStyle(Style):
+        styles = {
+            Token.String:     'ansibrightblue bg:ansibrightred',
+        }
+
+>>> code = 'print("Hello World")'
+>>> result = highlight(code, Python3Lexer(), Terminal256Formatter(style=MyStyle))
+>>> print(result.encode())
+b'\x1b[34;41;01m"\x1b[39;49;00m\x1b[34;41;01mHello World\x1b[39;49;00m\x1b[34;41;01m"\x1b[39;49;00m'
+
+
+

Colors specified using ansi* are converted to a default set of RGB colors +when used with formatters other than the terminal-256 formatter.

+

By definition of ANSI, the following colors are considered “light” colors, and +will be rendered by most terminals as bold:

+
    +
  • “brightblack” (darkgrey), “brightred”, “brightgreen”, “brightyellow”, “brightblue”, +“brightmagenta”, “brightcyan”, “white”

  • +
+

The following are considered “dark” colors and will be rendered as non-bold:

+
    +
  • “black”, “red”, “green”, “yellow”, “blue”, “magenta”, “cyan”, +“gray”

  • +
+

Exact behavior might depends on the terminal emulator you are using, and its +settings.

+
+

Changed in version 2.4.

+
+

The definition of the ANSI color names has changed. +New names are easier to understand and align to the colors used in other projects.

+ ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

New names

Pygments up to 2.3

ansiblack

#ansiblack

ansired

#ansidarkred

ansigreen

#ansidarkgreen

ansiyellow

#ansibrown

ansiblue

#ansidarkblue

ansimagenta

#ansipurple

ansicyan

#ansiteal

ansigray

#ansilightgray

ansibrightblack

#ansidarkgray

ansibrightred

#ansired

ansibrightgreen

#ansigreen

ansibrightyellow

#ansiyellow

ansibrightblue

#ansiblue

ansibrightmagenta

#ansifuchsia

ansibrightcyan

#ansiturquoise

ansiwhite

#ansiwhite

+

Old ANSI color names are deprecated but will still work.

+
+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/docs/tokens.html b/doc/_build/html/docs/tokens.html new file mode 100644 index 0000000..787deb6 --- /dev/null +++ b/doc/_build/html/docs/tokens.html @@ -0,0 +1,469 @@ + + + + + + + Builtin Tokens — Pygments + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Builtin Tokens¶

+

In the pygments.token module, there is a special object called Token +that is used to create token types.

+

You can create a new token type by accessing an attribute of Token:

+
>>> from pygments.token import Token
+>>> Token.String
+Token.String
+>>> Token.String is Token.String
+True
+
+
+

Note that tokens are singletons so you can use the is operator for comparing +token types.

+

As of Pygments 0.7 you can also use the in operator to perform set tests:

+
>>> from pygments.token import Comment
+>>> Comment.Single in Comment
+True
+>>> Comment in Comment.Multi
+False
+
+
+

This can be useful in filters and if you write lexers on your +own without using the base lexers.

+

You can also split a token type into a hierarchy, and get the parent of it:

+
>>> String.split()
+[Token, Token.Literal, Token.Literal.String]
+>>> String.parent
+Token.Literal
+
+
+

In principle, you can create an unlimited number of token types but nobody can +guarantee that a style would define style rules for a token type. Because of +that, Pygments proposes some global token types defined in the +pygments.token.STANDARD_TYPES dict.

+

For some tokens aliases are already defined:

+
>>> from pygments.token import String
+>>> String
+Token.Literal.String
+
+
+

Inside the pygments.token module the following aliases are defined:

+ +++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Text

Token.Text

for any type of text data

Whitespace

Token.Text.Whitespace

for specially highlighted whitespace

Error

Token.Error

represents lexer errors

Other

Token.Other

special token for data not +matched by a parser (e.g. HTML +markup in PHP code)

Keyword

Token.Keyword

any kind of keywords

Name

Token.Name

variable/function names

Literal

Token.Literal

Any literals

String

Token.Literal.String

string literals

Number

Token.Literal.Number

number literals

Operator

Token.Operator

operators (+, not…)

Punctuation

Token.Punctuation

punctuation ([, (…)

Comment

Token.Comment

any kind of comments

Generic

Token.Generic

generic tokens (have a look at +the explanation below)

+

The Whitespace token type is new in Pygments 0.8. It is used only by the +VisibleWhitespaceFilter currently.

+

Normally you just create token types using the already defined aliases. For each +of those token aliases, a number of subtypes exists (excluding the special tokens +Token.Text, Token.Error and Token.Other)

+

The is_token_subtype() function in the pygments.token module can be used to +test if a token type is a subtype of another (such as Name.Tag and Name). +(This is the same as Name.Tag in Name. The overloaded in operator was newly +introduced in Pygments 0.7, the function still exists for backwards +compatibility.)

+

With Pygments 0.7, it’s also possible to convert strings to token types (for example +if you want to supply a token from the command line):

+
>>> from pygments.token import String, string_to_tokentype
+>>> string_to_tokentype("String")
+Token.Literal.String
+>>> string_to_tokentype("Token.Literal.String")
+Token.Literal.String
+>>> string_to_tokentype(String)
+Token.Literal.String
+
+
+
+

Keyword Tokens¶

+
+
Keyword

For any kind of keyword (especially if it doesn’t match any of the +subtypes of course).

+
+
Keyword.Constant

For keywords that are constants (e.g. None in future Python versions).

+
+
Keyword.Declaration

For keywords used for variable declaration (e.g. var in some programming +languages like JavaScript).

+
+
Keyword.Namespace

For keywords used for namespace declarations (e.g. import in Python and +Java and package in Java).

+
+
Keyword.Pseudo

For keywords that aren’t really keywords (e.g. None in old Python +versions).

+
+
Keyword.Reserved

For reserved keywords.

+
+
Keyword.Type

For builtin types that can’t be used as identifiers (e.g. int, +char etc. in C).

+
+
+
+
+

Name Tokens¶

+
+
Name

For any name (variable names, function names, classes).

+
+
Name.Attribute

For all attributes (e.g. in HTML tags).

+
+
Name.Builtin

Builtin names; names that are available in the global namespace.

+
+
Name.Builtin.Pseudo

Builtin names that are implicit (e.g. self in Ruby, this in Java).

+
+
Name.Class

Class names. Because no lexer can know if a name is a class or a function +or something else this token is meant for class declarations.

+
+
Name.Constant

Token type for constants. In some languages you can recognise a token by the +way it’s defined (the value after a const keyword for example). In +other languages constants are uppercase by definition (Ruby).

+
+
Name.Decorator

Token type for decorators. Decorators are syntactic elements in the Python +language. Similar syntax elements exist in C# and Java.

+
+
Name.Entity

Token type for special entities. (e.g. &nbsp; in HTML).

+
+
Name.Exception

Token type for exception names (e.g. RuntimeError in Python). Some languages +define exceptions in the function signature (Java). You can highlight +the name of that exception using this token then.

+
+
Name.Function

Token type for function names.

+
+
Name.Function.Magic

same as Name.Function but for special function names that have an implicit use +in a language (e.g. __init__ method in Python).

+
+
Name.Label

Token type for label names (e.g. in languages that support goto).

+
+
Name.Namespace

Token type for namespaces. (e.g. import paths in Java/Python), names following +the module/namespace keyword in other languages.

+
+
Name.Other

Other names. Normally unused.

+
+
Name.Tag

Tag names (in HTML/XML markup or configuration files).

+
+
Name.Variable

Token type for variables. Some languages have prefixes for variable names +(PHP, Ruby, Perl). You can highlight them using this token.

+
+
Name.Variable.Class

same as Name.Variable but for class variables (also static variables).

+
+
Name.Variable.Global

same as Name.Variable but for global variables (used in Ruby, for +example).

+
+
Name.Variable.Instance

same as Name.Variable but for instance variables.

+
+
Name.Variable.Magic

same as Name.Variable but for special variable names that have an implicit use +in a language (e.g. __doc__ in Python).

+
+
+
+
+

Literals¶

+
+
Literal

For any literal (if not further defined).

+
+
Literal.Date

for date literals (e.g. 42d in Boo).

+
+
String

For any string literal.

+
+
String.Affix

Token type for affixes that further specify the type of the string they’re +attached to (e.g. the prefixes r and u8 in r"foo" and u8"foo").

+
+
String.Backtick

Token type for strings enclosed in backticks.

+
+
String.Char

Token type for single characters (e.g. Java, C).

+
+
String.Delimiter

Token type for delimiting identifiers in “heredoc”, raw and other similar +strings (e.g. the word END in Perl code print <<'END';).

+
+
String.Doc

Token type for documentation strings (for example Python).

+
+
String.Double

Double quoted strings.

+
+
String.Escape

Token type for escape sequences in strings.

+
+
String.Heredoc

Token type for “heredoc” strings (e.g. in Ruby or Perl).

+
+
String.Interpol

Token type for interpolated parts in strings (e.g. #{foo} in Ruby).

+
+
String.Other

Token type for any other strings (for example %q{foo} string constructs +in Ruby).

+
+
String.Regex

Token type for regular expression literals (e.g. /foo/ in JavaScript).

+
+
String.Single

Token type for single quoted strings.

+
+
String.Symbol

Token type for symbols (e.g. :foo in LISP or Ruby).

+
+
Number

Token type for any number literal.

+
+
Number.Bin

Token type for binary literals (e.g. 0b101010).

+
+
Number.Float

Token type for float literals (e.g. 42.0).

+
+
Number.Hex

Token type for hexadecimal number literals (e.g. 0xdeadbeef).

+
+
Number.Integer

Token type for integer literals (e.g. 42).

+
+
Number.Integer.Long

Token type for long integer literals (e.g. 42L in Python).

+
+
Number.Oct

Token type for octal literals.

+
+
+
+
+

Operators¶

+
+
Operator

For any punctuation operator (e.g. +, -).

+
+
Operator.Word

For any operator that is a word (e.g. not).

+
+
+
+
+

Punctuation¶

+
+

New in version 0.7.

+
+
+
Punctuation

For any punctuation which is not an operator (e.g. [, (…)

+
+
+
+
+

Comments¶

+
+
Comment

Token type for any comment.

+
+
Comment.Hashbang
+
Token type for hashbang comments (i.e. first lines of files that start with

#!).

+
+
+
+
Comment.Multiline

Token type for multiline comments.

+
+
Comment.Preproc

Token type for preprocessor comments (also <?php/<% constructs).

+
+
Comment.Single

Token type for comments that end at the end of a line (e.g. # foo).

+
+
Comment.Special

Special data in comments. For example code tags, author and license +information, etc.

+
+
+
+
+

Generic Tokens¶

+

Generic tokens are for special lexers like the DiffLexer that doesn’t really +highlight a programming language but a patch file.

+
+
Generic

A generic, unstyled token. Normally you don’t use this token type.

+
+
Generic.Deleted

Marks the token value as deleted.

+
+
Generic.Emph

Marks the token value as emphasized.

+
+
Generic.Error

Marks the token value as an error message.

+
+
Generic.Heading

Marks the token value as headline.

+
+
Generic.Inserted

Marks the token value as inserted.

+
+
Generic.Output

Marks the token value as program output (e.g. for python cli lexer).

+
+
Generic.Prompt

Marks the token value as command prompt (e.g. bash lexer).

+
+
Generic.Strong

Marks the token value as bold (e.g. for rst lexer).

+
+
Generic.Subheading

Marks the token value as subheadline.

+
+
Generic.Traceback

Marks the token value as a part of an error traceback.

+
+
+
+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/docs/unicode.html b/doc/_build/html/docs/unicode.html new file mode 100644 index 0000000..1b8aaf1 --- /dev/null +++ b/doc/_build/html/docs/unicode.html @@ -0,0 +1,170 @@ + + + + + + + Unicode and Encodings — Pygments + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Unicode and Encodings¶

+

Since Pygments 0.6, all lexers use unicode strings internally. Because of that +you might encounter the occasional UnicodeDecodeError if you pass strings +with the wrong encoding.

+

Per default all lexers have their input encoding set to guess. This means +that the following encodings are tried:

+
    +
  • UTF-8 (including BOM handling)

  • +
  • The locale encoding (i.e. the result of locale.getpreferredencoding())

  • +
  • As a last resort, latin1

  • +
+

If you pass a lexer a byte string object (not unicode), it tries to decode the +data using this encoding.

+

You can override the encoding using the encoding or inencoding lexer +options. If you have the chardet library installed and set the encoding to +chardet if will analyse the text and use the encoding it thinks is the +right one automatically:

+
from pygments.lexers import PythonLexer
+lexer = PythonLexer(encoding='chardet')
+
+
+

The best way is to pass Pygments unicode objects. In that case you can’t get +unexpected output.

+

The formatters now send Unicode objects to the stream if you don’t set the +output encoding. You can do so by passing the formatters an encoding option:

+
from pygments.formatters import HtmlFormatter
+f = HtmlFormatter(encoding='utf-8')
+
+
+

You will have to set this option if you have non-ASCII characters in the +source and the output stream does not accept Unicode written to it! +This is the case for all regular files and for terminals.

+

Note: The Terminal formatter tries to be smart: if its output stream has an +encoding attribute, and you haven’t set the option, it will encode any +Unicode string with this encoding before writing it. This is the case for +sys.stdout, for example. The other formatters don’t have that behavior.

+

Another note: If you call Pygments via the command line (pygmentize), +encoding is handled differently, see the command line docs.

+
+

New in version 0.7: The formatters now also accept an outencoding option which will override +the encoding option if given. This makes it possible to use a single +options dict with lexers and formatters, and still have different input and +output encodings.

+
+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/download.html b/doc/_build/html/download.html new file mode 100644 index 0000000..ab70e42 --- /dev/null +++ b/doc/_build/html/download.html @@ -0,0 +1,156 @@ + + + + + + + Download and installation — Pygments + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Download and installation¶

+

The current release is version 2.4.2.

+
+

Packaged versions¶

+

You can download it from the Python Package Index. For installation of packages from +PyPI, we recommend Pip, which works on all +major platforms.

+

Under Linux, most distributions include a package for Pygments, usually called +pygments or python-pygments. You can install it with the package +manager as usual.

+
+
+

Development sources¶

+

We’re using the Git version control system. You can get the development source +using this command:

+
git clone https://github.com/pygments/pygments
+
+
+

Development takes place at GitHub.

+

The latest changes in the development source code are listed in the changelog.

+
+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/faq.html b/doc/_build/html/faq.html new file mode 100644 index 0000000..7f164ab --- /dev/null +++ b/doc/_build/html/faq.html @@ -0,0 +1,255 @@ + + + + + + + Pygments FAQ — Pygments + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Pygments FAQ¶

+
+

What is Pygments?¶

+

Pygments is a syntax highlighting engine written in Python. That means, it will +take source code (or other markup) in a supported language and output a +processed version (in different formats) containing syntax highlighting markup.

+

Its features include:

+
    +
  • a wide range of common languages and markup formats is supported

  • +
  • new languages and formats are added easily

  • +
  • a number of output formats is available, including:

    +
      +
    • HTML

    • +
    • ANSI sequences (console output)

    • +
    • LaTeX

    • +
    • RTF

    • +
    +
  • +
  • it is usable as a command-line tool and as a library

  • +
  • parsing and formatting is fast

  • +
+

Pygments is licensed under the BSD license.

+
+
+

Where does the name Pygments come from?¶

+

Py of course stands for Python, while pigments are used for coloring paint, +and in this case, source code!

+
+
+

What are the system requirements?¶

+

Pygments only needs a standard Python install, version 2.7 or higher or version +3.5 or higher for Python 3. No additional libraries are needed.

+
+
+

How can I use Pygments?¶

+

Pygments is usable as a command-line tool as well as a library.

+

From the command-line, usage looks like this (assuming the pygmentize script is +properly installed):

+
pygmentize -f html /path/to/file.py
+
+
+

This will print a HTML-highlighted version of /path/to/file.py to standard output.

+

For a complete help, please run pygmentize -h.

+

Usage as a library is thoroughly demonstrated in the Documentation section.

+
+
+

How do I make a new style?¶

+

Please see the documentation on styles.

+
+
+

How can I report a bug or suggest a feature?¶

+

Please report bugs and feature wishes in the tracker at GitHub.

+

You can also e-mail the authors, see the contact details.

+
+
+

I want this support for this language!¶

+

Instead of waiting for others to include language support, why not write it +yourself? All you have to know is outlined in the docs.

+
+
+

Can I use Pygments for programming language processing?¶

+

The Pygments lexing machinery is quite powerful can be used to build lexers for +basically all languages. However, parsing them is not possible, though some +lexers go some steps in this direction in order to e.g. highlight function names +differently.

+

Also, error reporting is not the scope of Pygments. It focuses on correctly +highlighting syntactically valid documents, not finding and compensating errors.

+
+
+

Who uses Pygments?¶

+

This is an (incomplete) list of projects and sites known to use the Pygments highlighter.

+
    +
  • Wikipedia

  • +
  • BitBucket, a Mercurial and Git hosting site

  • +
  • The Sphinx documentation builder, for embedded source examples

  • +
  • rst2pdf, a reStructuredText to PDF converter

  • +
  • Codecov, a code coverage CI service

  • +
  • Trac, the universal project management tool

  • +
  • AsciiDoc, a text-based documentation generator

  • +
  • ActiveState Code, the Python Cookbook successor

  • +
  • ViewVC, a web-based version control repository browser

  • +
  • BzrFruit, a Bazaar branch viewer

  • +
  • QBzr, a cross-platform Qt-based GUI front end for Bazaar

  • +
  • Review Board, a collaborative code reviewing tool

  • +
  • Diamanda, a Django powered wiki system with support for Pygments

  • +
  • Progopedia (English), +an encyclopedia of programming languages

  • +
  • Bruce, a reStructuredText presentation tool

  • +
  • PIDA, a universal IDE written in Python

  • +
  • BPython, a curses-based intelligent Python shell

  • +
  • PuDB, a console Python debugger

  • +
  • XWiki, a wiki-based development framework in Java, using Jython

  • +
  • roux, a script for running R scripts +and creating beautiful output including graphs

  • +
  • hurl, a web service for making HTTP requests

  • +
  • wxHTMLPygmentizer is +a GUI utility, used to make code-colorization easier

  • +
  • Postmarkup, a BBCode to XHTML generator

  • +
  • WpPygments, and WPygments, highlighter plugins for WordPress

  • +
  • Siafoo, a tool for sharing and storing useful code and programming experience

  • +
  • D source, a community for the D programming language

  • +
  • dpaste.com, another Django pastebin

  • +
  • Django snippets, a pastebin for Django code

  • +
  • Fayaa, a Chinese pastebin

  • +
  • Incollo.com, a free collaborative debugging tool

  • +
  • PasteBox, a pastebin focused on privacy

  • +
  • hilite.me, a site to highlight code snippets

  • +
  • patx.me, a pastebin

  • +
  • Fluidic, an experiment in +integrating shells with a GUI

  • +
  • pygments.rb, a pygments wrapper for Ruby

  • +
  • Clygments, a pygments wrapper for +Clojure

  • +
  • PHPygments, a pygments wrapper for PHP

  • +
  • Spyder, the Scientific Python Development +Environment, uses pygments for the multi-language syntax highlighting in its +editor.

  • +
+

If you have a project or web site using Pygments, drop me a line, and I’ll add a +link here.

+
+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/genindex.html b/doc/_build/html/genindex.html new file mode 100644 index 0000000..9c1ed58 --- /dev/null +++ b/doc/_build/html/genindex.html @@ -0,0 +1,1682 @@ + + + + + + + + Index — Pygments + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + +

Index

+ +
+ A + | B + | C + | D + | E + | F + | G + | H + | I + | J + | K + | L + | M + | N + | O + | P + | Q + | R + | S + | T + | U + | V + | W + | X + | Y + | Z + +
+

A

+ + + +
+ +

B

+ + + +
+ +

C

+ + + +
+ +

D

+ + + +
+ +

E

+ + + +
+ +

F

+ + + +
+ +

G

+ + + +
+ +

H

+ + + +
+ +

I

+ + + +
+ +

J

+ + + +
+ +

K

+ + + +
+ +

L

+ + + +
+ +

M

+ + + +
+ +

N

+ + + +
+ +

O

+ + + +
+ +

P

+ + + +
+ +

Q

+ + + +
+ +

R

+ + + +
+ +

S

+ + + +
+ +

T

+ + + +
+ +

U

+ + + +
+ +

V

+ + + +
+ +

W

+ + + +
+ +

X

+ + + +
+ +

Y

+ + + +
+ +

Z

+ + + +
+ + + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/index.html b/doc/_build/html/index.html new file mode 100644 index 0000000..80f2259 --- /dev/null +++ b/doc/_build/html/index.html @@ -0,0 +1,166 @@ + + + + + + + Welcome! — Pygments + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Welcome!¶

+

This is the home of Pygments. It is a generic syntax highlighter suitable for +use in code hosting, forums, wikis or other applications that need to prettify +source code. Highlights are:

+
    +
  • a wide range of over 300 languages and other text formats is supported

  • +
  • special attention is paid to details that increase highlighting quality

  • +
  • support for new languages and formats are added easily; most languages use a +simple regex-based lexing mechanism

  • +
  • a number of output formats is available, among them HTML, RTF, LaTeX and ANSI +sequences

  • +
  • it is usable as a command-line tool and as a library

  • +
  • … and it highlights even Perl 6!

  • +
+

Read more in the FAQ list or the documentation, +or download the latest release.

+
+

Contribute¶

+

Like every open-source project, we are always looking for volunteers to help us +with programming. Python knowledge is required, but don’t fear: Python is a very +clear and easy to learn language.

+

Development takes place on GitHub.

+

If you found a bug, just open a ticket in the GitHub tracker. Be sure to log +in to be notified when the issue is fixed – development is not fast-paced as +the library is quite stable. You can also send an e-mail to the developers, see +below.

+
+
+

The authors¶

+

Pygments is maintained by Georg Brandl, e-mail address georg@python.org +and Matthäus Chajdas.

+

Many lexers and fixes have been contributed by Armin Ronacher, the rest of +the Pocoo team and Tim Hatch.

+
+
+
+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/languages.html b/doc/_build/html/languages.html new file mode 100644 index 0000000..279da8c --- /dev/null +++ b/doc/_build/html/languages.html @@ -0,0 +1,301 @@ + + + + + + + Supported languages — Pygments + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Supported languages¶

+

Pygments supports an ever-growing range of languages. Watch this space…

+
+

Programming languages¶

+
    +
  • ActionScript

  • +
  • Ada

  • +
  • ANTLR

  • +
  • AppleScript

  • +
  • Assembly (various)

  • +
  • Asymptote

  • +
  • Augeas

  • +
  • Awk

  • +
  • BBC Basic

  • +
  • Befunge

  • +
  • Boa

  • +
  • Boo

  • +
  • BrainFuck

  • +
  • C, C++

  • +
  • C#

  • +
  • Charm++ CI

  • +
  • Clojure

  • +
  • CoffeeScript

  • +
  • ColdFusion

  • +
  • Common Lisp

  • +
  • Coq

  • +
  • Cryptol (incl. Literate Cryptol)

  • +
  • Crystal

  • +
  • Cython

  • +
  • D

  • +
  • Dart

  • +
  • DCPU-16

  • +
  • Delphi

  • +
  • Dylan

  • +
  • Elm

  • +
  • Email

  • +
  • Erlang

  • +
  • Ezhil Ezhil - A Tamil programming language

  • +
  • Factor

  • +
  • Fancy

  • +
  • Fennel

  • +
  • FloScript

  • +
  • Fortran

  • +
  • FreeFEM++

  • +
  • F#

  • +
  • GAP

  • +
  • Gherkin (Cucumber)

  • +
  • GL shaders

  • +
  • Groovy

  • +
  • Haskell (incl. Literate Haskell)

  • +
  • HLSL

  • +
  • HSpec

  • +
  • IDL

  • +
  • Io

  • +
  • Java

  • +
  • JavaScript

  • +
  • Lasso

  • +
  • LLVM

  • +
  • Logtalk

  • +
  • Lua

  • +
  • Matlab

  • +
  • MiniD

  • +
  • Modelica

  • +
  • Modula-2

  • +
  • MuPad

  • +
  • Nemerle

  • +
  • Nimrod

  • +
  • Notmuch

  • +
  • Objective-C

  • +
  • Objective-J

  • +
  • Octave

  • +
  • OCaml

  • +
  • PHP

  • +
  • Perl 5 and Perl 6

  • +
  • Pony

  • +
  • PovRay

  • +
  • PostScript

  • +
  • PowerShell

  • +
  • Prolog

  • +
  • Python 2.x and 3.x (incl. console sessions and tracebacks)

  • +
  • REBOL

  • +
  • Red

  • +
  • Redcode

  • +
  • Ruby (incl. irb sessions)

  • +
  • Rust

  • +
  • S, S-Plus, R

  • +
  • Scala

  • +
  • Scdoc

  • +
  • Scheme

  • +
  • Scilab

  • +
  • SGF

  • +
  • Slash

  • +
  • Slurm

  • +
  • Smalltalk

  • +
  • SNOBOL

  • +
  • Solidity

  • +
  • Tcl

  • +
  • Tera Term language

  • +
  • TOML

  • +
  • Vala

  • +
  • Verilog

  • +
  • VHDL

  • +
  • Visual Basic.NET

  • +
  • Visual FoxPro

  • +
  • XQuery

  • +
  • Zeek

  • +
  • Zephir

  • +
  • Zig

  • +
+
+
+

Template languages¶

+
    +
  • Cheetah templates

  • +
  • Django / Jinja templates

  • +
  • ERB (Ruby templating)

  • +
  • Genshi (the Trac template language)

  • +
  • JSP (Java Server Pages)

  • +
  • Myghty (the HTML::Mason based framework)

  • +
  • Mako (the Myghty successor)

  • +
  • Smarty templates (PHP templating)

  • +
  • Tea

  • +
+
+
+

Other markup¶

+
    +
  • Apache config files

  • +
  • Bash shell scripts

  • +
  • BBCode

  • +
  • CMake

  • +
  • CSS

  • +
  • Debian control files

  • +
  • Diff files

  • +
  • DTD

  • +
  • Gettext catalogs

  • +
  • Gnuplot script

  • +
  • Groff markup

  • +
  • HTML

  • +
  • HTTP sessions

  • +
  • INI-style config files

  • +
  • IRC logs (irssi style)

  • +
  • Lighttpd config files

  • +
  • Makefiles

  • +
  • MoinMoin/Trac Wiki markup

  • +
  • MySQL

  • +
  • Nginx config files

  • +
  • POV-Ray scenes

  • +
  • Ragel

  • +
  • Redcode

  • +
  • ReST

  • +
  • Robot Framework

  • +
  • RPM spec files

  • +
  • SQL, also MySQL, SQLite

  • +
  • Squid configuration

  • +
  • TeX

  • +
  • tcsh

  • +
  • Vim Script

  • +
  • Windows batch files

  • +
  • XML

  • +
  • XSLT

  • +
  • YAML

  • +
+
+
+

… that’s all?¶

+

Well, why not write your own? Contributing to Pygments is easy and fun. Take a +look at the docs on lexer development. Pull +requests are welcome on GitHub <https://github.com/pygments/pygments>.

+

Note: the languages listed here are supported in the development version. The +latest release may lack a few of them.

+
+
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/objects.inv b/doc/_build/html/objects.inv new file mode 100644 index 0000000..63b10ba Binary files /dev/null and b/doc/_build/html/objects.inv differ diff --git a/doc/_build/html/py-modindex.html b/doc/_build/html/py-modindex.html new file mode 100644 index 0000000..60a287e --- /dev/null +++ b/doc/_build/html/py-modindex.html @@ -0,0 +1,820 @@ + + + + + + + Python Module Index — Pygments + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ + +

Python Module Index

+ +
+ p +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 
+ p
+ pygments +
    + pygments.formatter +
    + pygments.formatters +
    + pygments.lexer +
    + pygments.lexers +
    + pygments.lexers.actionscript +
    + pygments.lexers.algebra +
    + pygments.lexers.ambient +
    + pygments.lexers.ampl +
    + pygments.lexers.apl +
    + pygments.lexers.archetype +
    + pygments.lexers.asm +
    + pygments.lexers.automation +
    + pygments.lexers.basic +
    + pygments.lexers.bibtex +
    + pygments.lexers.boa +
    + pygments.lexers.business +
    + pygments.lexers.c_cpp +
    + pygments.lexers.c_like +
    + pygments.lexers.capnproto +
    + pygments.lexers.chapel +
    + pygments.lexers.clean +
    + pygments.lexers.configs +
    + pygments.lexers.console +
    + pygments.lexers.crystal +
    + pygments.lexers.csound +
    + pygments.lexers.css +
    + pygments.lexers.d +
    + pygments.lexers.dalvik +
    + pygments.lexers.data +
    + pygments.lexers.diff +
    + pygments.lexers.dotnet +
    + pygments.lexers.dsls +
    + pygments.lexers.dylan +
    + pygments.lexers.ecl +
    + pygments.lexers.eiffel +
    + pygments.lexers.elm +
    + pygments.lexers.email +
    + pygments.lexers.erlang +
    + pygments.lexers.esoteric +
    + pygments.lexers.ezhil +
    + pygments.lexers.factor +
    + pygments.lexers.fantom +
    + pygments.lexers.felix +
    + pygments.lexers.floscript +
    + pygments.lexers.forth +
    + pygments.lexers.fortran +
    + pygments.lexers.foxpro +
    + pygments.lexers.freefem +
    + pygments.lexers.go +
    + pygments.lexers.grammar_notation +
    + pygments.lexers.graph +
    + pygments.lexers.graphics +
    + pygments.lexers.haskell +
    + pygments.lexers.haxe +
    + pygments.lexers.hdl +
    + pygments.lexers.hexdump +
    + pygments.lexers.html +
    + pygments.lexers.idl +
    + pygments.lexers.igor +
    + pygments.lexers.inferno +
    + pygments.lexers.installers +
    + pygments.lexers.int_fiction +
    + pygments.lexers.iolang +
    + pygments.lexers.j +
    + pygments.lexers.javascript +
    + pygments.lexers.julia +
    + pygments.lexers.jvm +
    + pygments.lexers.lisp +
    + pygments.lexers.make +
    + pygments.lexers.markup +
    + pygments.lexers.matlab +
    + pygments.lexers.mime +
    + pygments.lexers.ml +
    + pygments.lexers.modeling +
    + pygments.lexers.modula2 +
    + pygments.lexers.monte +
    + pygments.lexers.ncl +
    + pygments.lexers.nimrod +
    + pygments.lexers.nit +
    + pygments.lexers.nix +
    + pygments.lexers.oberon +
    + pygments.lexers.objective +
    + pygments.lexers.ooc +
    + pygments.lexers.parasail +
    + pygments.lexers.parsers +
    + pygments.lexers.pascal +
    + pygments.lexers.pawn +
    + pygments.lexers.perl +
    + pygments.lexers.php +
    + pygments.lexers.pony +
    + pygments.lexers.praat +
    + pygments.lexers.prolog +
    + pygments.lexers.python +
    + pygments.lexers.qvt +
    + pygments.lexers.r +
    + pygments.lexers.rdf +
    + pygments.lexers.rebol +
    + pygments.lexers.resource +
    + pygments.lexers.rnc +
    + pygments.lexers.roboconf +
    + pygments.lexers.robotframework +
    + pygments.lexers.ruby +
    + pygments.lexers.rust +
    + pygments.lexers.sas +
    + pygments.lexers.scdoc +
    + pygments.lexers.scripting +
    + pygments.lexers.sgf +
    + pygments.lexers.shell +
    + pygments.lexers.slash +
    + pygments.lexers.smalltalk +
    + pygments.lexers.smv +
    + pygments.lexers.snobol +
    + pygments.lexers.solidity +
    + pygments.lexers.special +
    + pygments.lexers.sql +
    + pygments.lexers.stata +
    + pygments.lexers.supercollider +
    + pygments.lexers.tcl +
    + pygments.lexers.templates +
    + pygments.lexers.teraterm +
    + pygments.lexers.testing +
    + pygments.lexers.textedit +
    + pygments.lexers.textfmts +
    + pygments.lexers.theorem +
    + pygments.lexers.trafficscript +
    + pygments.lexers.typoscript +
    + pygments.lexers.unicon +
    + pygments.lexers.urbi +
    + pygments.lexers.varnish +
    + pygments.lexers.verification +
    + pygments.lexers.webmisc +
    + pygments.lexers.whiley +
    + pygments.lexers.x10 +
    + pygments.lexers.xorg +
    + pygments.lexers.zig +
    + pygments.styles +
    + pygments.token +
    + pygments.util +
+ + +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/search.html b/doc/_build/html/search.html new file mode 100644 index 0000000..ab4abe5 --- /dev/null +++ b/doc/_build/html/search.html @@ -0,0 +1,126 @@ + + + + + + + Search — Pygments + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Search

+
+ +

+ Please activate JavaScript to enable the search + functionality. +

+
+

+ From here you can search these documents. Enter your search + words into the box below and click "search". Note that the search + function will automatically search for all of the words. Pages + containing fewer words won't appear in the result list. +

+
+ + + +
+ +
+ +
+ +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/doc/_build/html/searchindex.js b/doc/_build/html/searchindex.js new file mode 100644 index 0000000..bba49a9 --- /dev/null +++ b/doc/_build/html/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({docnames:["docs/api","docs/authors","docs/changelog","docs/cmdline","docs/filterdevelopment","docs/filters","docs/formatterdevelopment","docs/formatters","docs/index","docs/integrate","docs/java","docs/lexerdevelopment","docs/lexers","docs/moinmoin","docs/plugins","docs/quickstart","docs/rstdirective","docs/styles","docs/tokens","docs/unicode","download","faq","index","languages"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,sphinx:56},filenames:["docs/api.rst","docs/authors.rst","docs/changelog.rst","docs/cmdline.rst","docs/filterdevelopment.rst","docs/filters.rst","docs/formatterdevelopment.rst","docs/formatters.rst","docs/index.rst","docs/integrate.rst","docs/java.rst","docs/lexerdevelopment.rst","docs/lexers.rst","docs/moinmoin.rst","docs/plugins.rst","docs/quickstart.rst","docs/rstdirective.rst","docs/styles.rst","docs/tokens.rst","docs/unicode.rst","download.rst","faq.rst","index.rst","languages.rst"],objects:{"":{BBCodeFormatter:[7,0,1,""],BmpImageFormatter:[7,0,1,""],CodeTagFilter:[5,0,1,""],GifImageFormatter:[7,0,1,""],GobbleFilter:[5,0,1,""],HtmlFormatter:[7,0,1,""],IRCFormatter:[7,0,1,""],ImageFormatter:[7,0,1,""],JpgImageFormatter:[7,0,1,""],KeywordCaseFilter:[5,0,1,""],LatexFormatter:[7,0,1,""],NameHighlightFilter:[5,0,1,""],NullFormatter:[7,0,1,""],RaiseOnErrorTokenFilter:[5,0,1,""],RawTokenFormatter:[7,0,1,""],RtfFormatter:[7,0,1,""],SvgFormatter:[7,0,1,""],Terminal256Formatter:[7,0,1,""],TerminalFormatter:[7,0,1,""],TerminalTrueColorFormatter:[7,0,1,""],TestcaseFormatter:[7,0,1,""],TokenMergeFilter:[5,0,1,""],VisibleWhitespaceFilter:[5,0,1,""],pygments:[0,1,0,"-"]},"pygments.formatter":{Formatter:[0,0,1,""]},"pygments.formatter.Formatter":{aliases:[0,3,1,""],filenames:[0,3,1,""],format:[0,4,1,""],get_style_defs:[0,4,1,""],name:[0,3,1,""]},"pygments.formatters":{get_formatter_by_name:[0,2,1,""],get_formatter_for_filename:[0,2,1,""],load_formatter_from_file:[0,2,1,""]},"pygments.lexer":{Lexer:[0,0,1,""]},"pygments.lexer.Lexer":{alias_filenames:[0,3,1,""],aliases:[0,3,1,""],analyse_text:[0,4,1,""],filenames:[0,3,1,""],get_tokens:[0,4,1,""],get_tokens_unprocessed:[0,4,1,""],mimetypes:[0,3,1,""],name:[0,3,1,""]},"pygments.lexers":{actionscript:[12,1,0,"-"],algebra:[12,1,0,"-"],ambient:[12,1,0,"-"],ampl:[12,1,0,"-"],apl:[12,1,0,"-"],archetype:[12,1,0,"-"],asm:[12,1,0,"-"],automation:[12,1,0,"-"],basic:[12,1,0,"-"],bibtex:[12,1,0,"-"],boa:[12,1,0,"-"],business:[12,1,0,"-"],c_cpp:[12,1,0,"-"],c_like:[12,1,0,"-"],capnproto:[12,1,0,"-"],chapel:[12,1,0,"-"],clean:[12,1,0,"-"],configs:[12,1,0,"-"],console:[12,1,0,"-"],crystal:[12,1,0,"-"],csound:[12,1,0,"-"],css:[12,1,0,"-"],d:[12,1,0,"-"],dalvik:[12,1,0,"-"],data:[12,1,0,"-"],diff:[12,1,0,"-"],dotnet:[12,1,0,"-"],dsls:[12,1,0,"-"],dylan:[12,1,0,"-"],ecl:[12,1,0,"-"],eiffel:[12,1,0,"-"],elm:[12,1,0,"-"],email:[12,1,0,"-"],erlang:[12,1,0,"-"],esoteric:[12,1,0,"-"],ezhil:[12,1,0,"-"],factor:[12,1,0,"-"],fantom:[12,1,0,"-"],felix:[12,1,0,"-"],find_lexer_class:[0,2,1,""],find_lexer_class_by_name:[0,2,1,""],floscript:[12,1,0,"-"],forth:[12,1,0,"-"],fortran:[12,1,0,"-"],foxpro:[12,1,0,"-"],freefem:[12,1,0,"-"],get_all_lexers:[0,2,1,""],get_lexer_by_name:[0,2,1,""],get_lexer_for_filename:[0,2,1,""],get_lexer_for_mimetype:[0,2,1,""],go:[12,1,0,"-"],grammar_notation:[12,1,0,"-"],graph:[12,1,0,"-"],graphics:[12,1,0,"-"],guess_lexer:[0,2,1,""],guess_lexer_for_filename:[0,2,1,""],haskell:[12,1,0,"-"],haxe:[12,1,0,"-"],hdl:[12,1,0,"-"],hexdump:[12,1,0,"-"],html:[12,1,0,"-"],idl:[12,1,0,"-"],igor:[12,1,0,"-"],inferno:[12,1,0,"-"],installers:[12,1,0,"-"],int_fiction:[12,1,0,"-"],iolang:[12,1,0,"-"],j:[12,1,0,"-"],javascript:[12,1,0,"-"],julia:[12,1,0,"-"],jvm:[12,1,0,"-"],lisp:[12,1,0,"-"],load_lexer_from_file:[0,2,1,""],make:[12,1,0,"-"],markup:[12,1,0,"-"],matlab:[12,1,0,"-"],mime:[12,1,0,"-"],ml:[12,1,0,"-"],modeling:[12,1,0,"-"],modula2:[12,1,0,"-"],monte:[12,1,0,"-"],ncl:[12,1,0,"-"],nimrod:[12,1,0,"-"],nit:[12,1,0,"-"],nix:[12,1,0,"-"],oberon:[12,1,0,"-"],objective:[12,1,0,"-"],ooc:[12,1,0,"-"],parasail:[12,1,0,"-"],parsers:[12,1,0,"-"],pascal:[12,1,0,"-"],pawn:[12,1,0,"-"],perl:[12,1,0,"-"],php:[12,1,0,"-"],pony:[12,1,0,"-"],praat:[12,1,0,"-"],prolog:[12,1,0,"-"],python:[12,1,0,"-"],qvt:[12,1,0,"-"],r:[12,1,0,"-"],rdf:[12,1,0,"-"],rebol:[12,1,0,"-"],resource:[12,1,0,"-"],rnc:[12,1,0,"-"],roboconf:[12,1,0,"-"],robotframework:[12,1,0,"-"],ruby:[12,1,0,"-"],rust:[12,1,0,"-"],sas:[12,1,0,"-"],scdoc:[12,1,0,"-"],scripting:[12,1,0,"-"],sgf:[12,1,0,"-"],shell:[12,1,0,"-"],slash:[12,1,0,"-"],smalltalk:[12,1,0,"-"],smv:[12,1,0,"-"],snobol:[12,1,0,"-"],solidity:[12,1,0,"-"],special:[12,1,0,"-"],sql:[12,1,0,"-"],stata:[12,1,0,"-"],supercollider:[12,1,0,"-"],tcl:[12,1,0,"-"],templates:[12,1,0,"-"],teraterm:[12,1,0,"-"],testing:[12,1,0,"-"],textedit:[12,1,0,"-"],textfmts:[12,1,0,"-"],theorem:[12,1,0,"-"],trafficscript:[12,1,0,"-"],typoscript:[12,1,0,"-"],unicon:[12,1,0,"-"],urbi:[12,1,0,"-"],varnish:[12,1,0,"-"],verification:[12,1,0,"-"],webmisc:[12,1,0,"-"],whiley:[12,1,0,"-"],x10:[12,1,0,"-"],xorg:[12,1,0,"-"],zig:[12,1,0,"-"]},"pygments.lexers.actionscript":{ActionScript3Lexer:[12,0,1,""],ActionScriptLexer:[12,0,1,""],MxmlLexer:[12,0,1,""]},"pygments.lexers.algebra":{BCLexer:[12,0,1,""],GAPLexer:[12,0,1,""],MathematicaLexer:[12,0,1,""],MuPADLexer:[12,0,1,""]},"pygments.lexers.ambient":{AmbientTalkLexer:[12,0,1,""]},"pygments.lexers.ampl":{AmplLexer:[12,0,1,""]},"pygments.lexers.apl":{APLLexer:[12,0,1,""]},"pygments.lexers.archetype":{AdlLexer:[12,0,1,""],CadlLexer:[12,0,1,""],OdinLexer:[12,0,1,""]},"pygments.lexers.asm":{CObjdumpLexer:[12,0,1,""],Ca65Lexer:[12,0,1,""],CppObjdumpLexer:[12,0,1,""],DObjdumpLexer:[12,0,1,""],Dasm16Lexer:[12,0,1,""],GasLexer:[12,0,1,""],HsailLexer:[12,0,1,""],LlvmLexer:[12,0,1,""],NasmLexer:[12,0,1,""],NasmObjdumpLexer:[12,0,1,""],ObjdumpLexer:[12,0,1,""],TasmLexer:[12,0,1,""]},"pygments.lexers.automation":{AutoItLexer:[12,0,1,""],AutohotkeyLexer:[12,0,1,""]},"pygments.lexers.basic":{BBCBasicLexer:[12,0,1,""],BlitzBasicLexer:[12,0,1,""],BlitzMaxLexer:[12,0,1,""],CbmBasicV2Lexer:[12,0,1,""],MonkeyLexer:[12,0,1,""],QBasicLexer:[12,0,1,""],VBScriptLexer:[12,0,1,""]},"pygments.lexers.bibtex":{BSTLexer:[12,0,1,""],BibTeXLexer:[12,0,1,""]},"pygments.lexers.boa":{BoaLexer:[12,0,1,""]},"pygments.lexers.business":{ABAPLexer:[12,0,1,""],CobolFreeformatLexer:[12,0,1,""],CobolLexer:[12,0,1,""],GoodDataCLLexer:[12,0,1,""],MaqlLexer:[12,0,1,""],OpenEdgeLexer:[12,0,1,""]},"pygments.lexers.c_cpp":{CLexer:[12,0,1,""],CppLexer:[12,0,1,""]},"pygments.lexers.c_like":{ArduinoLexer:[12,0,1,""],CharmciLexer:[12,0,1,""],ClayLexer:[12,0,1,""],CudaLexer:[12,0,1,""],ECLexer:[12,0,1,""],MqlLexer:[12,0,1,""],NesCLexer:[12,0,1,""],PikeLexer:[12,0,1,""],SwigLexer:[12,0,1,""],ValaLexer:[12,0,1,""]},"pygments.lexers.capnproto":{CapnProtoLexer:[12,0,1,""]},"pygments.lexers.chapel":{ChapelLexer:[12,0,1,""]},"pygments.lexers.clean":{CleanLexer:[12,0,1,""]},"pygments.lexers.configs":{ApacheConfLexer:[12,0,1,""],AugeasLexer:[12,0,1,""],Cfengine3Lexer:[12,0,1,""],DockerLexer:[12,0,1,""],IniLexer:[12,0,1,""],KconfigLexer:[12,0,1,""],LighttpdConfLexer:[12,0,1,""],NginxConfLexer:[12,0,1,""],PacmanConfLexer:[12,0,1,""],PkgConfigLexer:[12,0,1,""],PropertiesLexer:[12,0,1,""],RegeditLexer:[12,0,1,""],SquidConfLexer:[12,0,1,""],TOMLLexer:[12,0,1,""],TermcapLexer:[12,0,1,""],TerminfoLexer:[12,0,1,""],TerraformLexer:[12,0,1,""]},"pygments.lexers.console":{PyPyLogLexer:[12,0,1,""],VCTreeStatusLexer:[12,0,1,""]},"pygments.lexers.crystal":{CrystalLexer:[12,0,1,""]},"pygments.lexers.csound":{CsoundDocumentLexer:[12,0,1,""],CsoundOrchestraLexer:[12,0,1,""],CsoundScoreLexer:[12,0,1,""]},"pygments.lexers.css":{CssLexer:[12,0,1,""],LessCssLexer:[12,0,1,""],SassLexer:[12,0,1,""],ScssLexer:[12,0,1,""]},"pygments.lexers.d":{CrocLexer:[12,0,1,""],DLexer:[12,0,1,""],MiniDLexer:[12,0,1,""]},"pygments.lexers.dalvik":{SmaliLexer:[12,0,1,""]},"pygments.lexers.data":{JsonBareObjectLexer:[12,0,1,""],JsonLdLexer:[12,0,1,""],JsonLexer:[12,0,1,""],YamlLexer:[12,0,1,""]},"pygments.lexers.diff":{DarcsPatchLexer:[12,0,1,""],DiffLexer:[12,0,1,""],WDiffLexer:[12,0,1,""]},"pygments.lexers.dotnet":{BooLexer:[12,0,1,""],CSharpAspxLexer:[12,0,1,""],CSharpLexer:[12,0,1,""],FSharpLexer:[12,0,1,""],NemerleLexer:[12,0,1,""],VbNetAspxLexer:[12,0,1,""],VbNetLexer:[12,0,1,""]},"pygments.lexers.dsls":{AlloyLexer:[12,0,1,""],CrmshLexer:[12,0,1,""],FlatlineLexer:[12,0,1,""],MscgenLexer:[12,0,1,""],PanLexer:[12,0,1,""],ProtoBufLexer:[12,0,1,""],PuppetLexer:[12,0,1,""],RslLexer:[12,0,1,""],SnowballLexer:[12,0,1,""],ThriftLexer:[12,0,1,""],VGLLexer:[12,0,1,""],ZeekLexer:[12,0,1,""]},"pygments.lexers.dylan":{DylanConsoleLexer:[12,0,1,""],DylanLexer:[12,0,1,""],DylanLidLexer:[12,0,1,""]},"pygments.lexers.ecl":{ECLLexer:[12,0,1,""]},"pygments.lexers.eiffel":{EiffelLexer:[12,0,1,""]},"pygments.lexers.elm":{ElmLexer:[12,0,1,""]},"pygments.lexers.email":{EmailLexer:[12,0,1,""]},"pygments.lexers.erlang":{ElixirConsoleLexer:[12,0,1,""],ElixirLexer:[12,0,1,""],ErlangLexer:[12,0,1,""],ErlangShellLexer:[12,0,1,""]},"pygments.lexers.esoteric":{AheuiLexer:[12,0,1,""],BefungeLexer:[12,0,1,""],BrainfuckLexer:[12,0,1,""],CAmkESLexer:[12,0,1,""],CapDLLexer:[12,0,1,""],RedcodeLexer:[12,0,1,""]},"pygments.lexers.ezhil":{EzhilLexer:[12,0,1,""]},"pygments.lexers.factor":{FactorLexer:[12,0,1,""]},"pygments.lexers.fantom":{FantomLexer:[12,0,1,""]},"pygments.lexers.felix":{FelixLexer:[12,0,1,""]},"pygments.lexers.floscript":{FloScriptLexer:[12,0,1,""]},"pygments.lexers.forth":{ForthLexer:[12,0,1,""]},"pygments.lexers.fortran":{FortranFixedLexer:[12,0,1,""],FortranLexer:[12,0,1,""]},"pygments.lexers.foxpro":{FoxProLexer:[12,0,1,""]},"pygments.lexers.freefem":{FreeFemLexer:[12,0,1,""]},"pygments.lexers.go":{GoLexer:[12,0,1,""]},"pygments.lexers.grammar_notation":{AbnfLexer:[12,0,1,""],BnfLexer:[12,0,1,""],JsgfLexer:[12,0,1,""]},"pygments.lexers.graph":{CypherLexer:[12,0,1,""]},"pygments.lexers.graphics":{AsymptoteLexer:[12,0,1,""],GLShaderLexer:[12,0,1,""],GnuplotLexer:[12,0,1,""],HLSLShaderLexer:[12,0,1,""],PostScriptLexer:[12,0,1,""],PovrayLexer:[12,0,1,""]},"pygments.lexers.haskell":{AgdaLexer:[12,0,1,""],CryptolLexer:[12,0,1,""],HaskellLexer:[12,0,1,""],HspecLexer:[12,0,1,""],IdrisLexer:[12,0,1,""],KokaLexer:[12,0,1,""],LiterateAgdaLexer:[12,0,1,""],LiterateCryptolLexer:[12,0,1,""],LiterateHaskellLexer:[12,0,1,""],LiterateIdrisLexer:[12,0,1,""]},"pygments.lexers.haxe":{HaxeLexer:[12,0,1,""],HxmlLexer:[12,0,1,""]},"pygments.lexers.hdl":{SystemVerilogLexer:[12,0,1,""],VerilogLexer:[12,0,1,""],VhdlLexer:[12,0,1,""]},"pygments.lexers.hexdump":{HexdumpLexer:[12,0,1,""]},"pygments.lexers.html":{DtdLexer:[12,0,1,""],HamlLexer:[12,0,1,""],HtmlLexer:[12,0,1,""],PugLexer:[12,0,1,""],ScamlLexer:[12,0,1,""],XmlLexer:[12,0,1,""],XsltLexer:[12,0,1,""]},"pygments.lexers.idl":{IDLLexer:[12,0,1,""]},"pygments.lexers.igor":{IgorLexer:[12,0,1,""]},"pygments.lexers.inferno":{LimboLexer:[12,0,1,""]},"pygments.lexers.installers":{DebianControlLexer:[12,0,1,""],NSISLexer:[12,0,1,""],RPMSpecLexer:[12,0,1,""],SourcesListLexer:[12,0,1,""]},"pygments.lexers.int_fiction":{Inform6Lexer:[12,0,1,""],Inform6TemplateLexer:[12,0,1,""],Inform7Lexer:[12,0,1,""],Tads3Lexer:[12,0,1,""]},"pygments.lexers.iolang":{IoLexer:[12,0,1,""]},"pygments.lexers.j":{JLexer:[12,0,1,""]},"pygments.lexers.javascript":{CoffeeScriptLexer:[12,0,1,""],DartLexer:[12,0,1,""],EarlGreyLexer:[12,0,1,""],JavascriptLexer:[12,0,1,""],JuttleLexer:[12,0,1,""],KalLexer:[12,0,1,""],LassoLexer:[12,0,1,""],LiveScriptLexer:[12,0,1,""],MaskLexer:[12,0,1,""],ObjectiveJLexer:[12,0,1,""],TypeScriptLexer:[12,0,1,""]},"pygments.lexers.julia":{JuliaConsoleLexer:[12,0,1,""],JuliaLexer:[12,0,1,""]},"pygments.lexers.jvm":{AspectJLexer:[12,0,1,""],CeylonLexer:[12,0,1,""],ClojureLexer:[12,0,1,""],ClojureScriptLexer:[12,0,1,""],GoloLexer:[12,0,1,""],GosuLexer:[12,0,1,""],GosuTemplateLexer:[12,0,1,""],GroovyLexer:[12,0,1,""],IokeLexer:[12,0,1,""],JasminLexer:[12,0,1,""],JavaLexer:[12,0,1,""],KotlinLexer:[12,0,1,""],PigLexer:[12,0,1,""],SarlLexer:[12,0,1,""],ScalaLexer:[12,0,1,""],XtendLexer:[12,0,1,""]},"pygments.lexers.lisp":{CPSALexer:[12,0,1,""],CommonLispLexer:[12,0,1,""],EmacsLispLexer:[12,0,1,""],FennelLexer:[12,0,1,""],HyLexer:[12,0,1,""],NewLispLexer:[12,0,1,""],RacketLexer:[12,0,1,""],SchemeLexer:[12,0,1,""],ShenLexer:[12,0,1,""],XtlangLexer:[12,0,1,""]},"pygments.lexers.make":{BaseMakefileLexer:[12,0,1,""],CMakeLexer:[12,0,1,""],MakefileLexer:[12,0,1,""]},"pygments.lexers.markup":{BBCodeLexer:[12,0,1,""],GroffLexer:[12,0,1,""],MarkdownLexer:[12,0,1,""],MoinWikiLexer:[12,0,1,""],MozPreprocCssLexer:[12,0,1,""],MozPreprocHashLexer:[12,0,1,""],MozPreprocJavascriptLexer:[12,0,1,""],MozPreprocPercentLexer:[12,0,1,""],MozPreprocXulLexer:[12,0,1,""],RstLexer:[12,0,1,""],TexLexer:[12,0,1,""]},"pygments.lexers.matlab":{MatlabLexer:[12,0,1,""],MatlabSessionLexer:[12,0,1,""],OctaveLexer:[12,0,1,""],ScilabLexer:[12,0,1,""]},"pygments.lexers.mime":{MIMELexer:[12,0,1,""]},"pygments.lexers.ml":{OcamlLexer:[12,0,1,""],OpaLexer:[12,0,1,""],SMLLexer:[12,0,1,""]},"pygments.lexers.modeling":{BugsLexer:[12,0,1,""],JagsLexer:[12,0,1,""],ModelicaLexer:[12,0,1,""],StanLexer:[12,0,1,""]},"pygments.lexers.modula2":{Modula2Lexer:[12,0,1,""]},"pygments.lexers.monte":{MonteLexer:[12,0,1,""]},"pygments.lexers.ncl":{NCLLexer:[12,0,1,""]},"pygments.lexers.nimrod":{NimrodLexer:[12,0,1,""]},"pygments.lexers.nit":{NitLexer:[12,0,1,""]},"pygments.lexers.nix":{NixLexer:[12,0,1,""]},"pygments.lexers.oberon":{ComponentPascalLexer:[12,0,1,""]},"pygments.lexers.objective":{LogosLexer:[12,0,1,""],ObjectiveCLexer:[12,0,1,""],ObjectiveCppLexer:[12,0,1,""],SwiftLexer:[12,0,1,""]},"pygments.lexers.ooc":{OocLexer:[12,0,1,""]},"pygments.lexers.parasail":{ParaSailLexer:[12,0,1,""]},"pygments.lexers.parsers":{AntlrActionScriptLexer:[12,0,1,""],AntlrCSharpLexer:[12,0,1,""],AntlrCppLexer:[12,0,1,""],AntlrJavaLexer:[12,0,1,""],AntlrLexer:[12,0,1,""],AntlrObjectiveCLexer:[12,0,1,""],AntlrPerlLexer:[12,0,1,""],AntlrPythonLexer:[12,0,1,""],AntlrRubyLexer:[12,0,1,""],EbnfLexer:[12,0,1,""],RagelCLexer:[12,0,1,""],RagelCppLexer:[12,0,1,""],RagelDLexer:[12,0,1,""],RagelEmbeddedLexer:[12,0,1,""],RagelJavaLexer:[12,0,1,""],RagelLexer:[12,0,1,""],RagelObjectiveCLexer:[12,0,1,""],RagelRubyLexer:[12,0,1,""],TreetopLexer:[12,0,1,""]},"pygments.lexers.pascal":{AdaLexer:[12,0,1,""],DelphiLexer:[12,0,1,""]},"pygments.lexers.pawn":{PawnLexer:[12,0,1,""],SourcePawnLexer:[12,0,1,""]},"pygments.lexers.perl":{Perl6Lexer:[12,0,1,""],PerlLexer:[12,0,1,""]},"pygments.lexers.php":{PhpLexer:[12,0,1,""],ZephirLexer:[12,0,1,""]},"pygments.lexers.pony":{PonyLexer:[12,0,1,""]},"pygments.lexers.praat":{PraatLexer:[12,0,1,""]},"pygments.lexers.prolog":{LogtalkLexer:[12,0,1,""],PrologLexer:[12,0,1,""]},"pygments.lexers.python":{CythonLexer:[12,0,1,""],DgLexer:[12,0,1,""],NumPyLexer:[12,0,1,""],Python2Lexer:[12,0,1,""],Python2TracebackLexer:[12,0,1,""],PythonConsoleLexer:[12,0,1,""],PythonLexer:[12,0,1,""],PythonTracebackLexer:[12,0,1,""]},"pygments.lexers.qvt":{QVToLexer:[12,0,1,""]},"pygments.lexers.r":{RConsoleLexer:[12,0,1,""],RdLexer:[12,0,1,""],SLexer:[12,0,1,""]},"pygments.lexers.rdf":{ShExCLexer:[12,0,1,""],SparqlLexer:[12,0,1,""],TurtleLexer:[12,0,1,""]},"pygments.lexers.rebol":{RebolLexer:[12,0,1,""],RedLexer:[12,0,1,""]},"pygments.lexers.resource":{ResourceLexer:[12,0,1,""]},"pygments.lexers.rnc":{RNCCompactLexer:[12,0,1,""]},"pygments.lexers.roboconf":{RoboconfGraphLexer:[12,0,1,""],RoboconfInstancesLexer:[12,0,1,""]},"pygments.lexers.robotframework":{RobotFrameworkLexer:[12,0,1,""]},"pygments.lexers.ruby":{FancyLexer:[12,0,1,""],RubyConsoleLexer:[12,0,1,""],RubyLexer:[12,0,1,""]},"pygments.lexers.rust":{RustLexer:[12,0,1,""]},"pygments.lexers.sas":{SASLexer:[12,0,1,""]},"pygments.lexers.scdoc":{ScdocLexer:[12,0,1,""]},"pygments.lexers.scripting":{AppleScriptLexer:[12,0,1,""],ChaiscriptLexer:[12,0,1,""],EasytrieveLexer:[12,0,1,""],HybrisLexer:[12,0,1,""],JclLexer:[12,0,1,""],LSLLexer:[12,0,1,""],LuaLexer:[12,0,1,""],MOOCodeLexer:[12,0,1,""],MoonScriptLexer:[12,0,1,""],RexxLexer:[12,0,1,""]},"pygments.lexers.sgf":{SmartGameFormatLexer:[12,0,1,""]},"pygments.lexers.shell":{BashLexer:[12,0,1,""],BashSessionLexer:[12,0,1,""],BatchLexer:[12,0,1,""],FishShellLexer:[12,0,1,""],MSDOSSessionLexer:[12,0,1,""],PowerShellLexer:[12,0,1,""],PowerShellSessionLexer:[12,0,1,""],SlurmBashLexer:[12,0,1,""],TcshLexer:[12,0,1,""],TcshSessionLexer:[12,0,1,""]},"pygments.lexers.slash":{SlashLexer:[12,0,1,""]},"pygments.lexers.smalltalk":{NewspeakLexer:[12,0,1,""],SmalltalkLexer:[12,0,1,""]},"pygments.lexers.smv":{NuSMVLexer:[12,0,1,""]},"pygments.lexers.snobol":{SnobolLexer:[12,0,1,""]},"pygments.lexers.solidity":{SolidityLexer:[12,0,1,""]},"pygments.lexers.special":{RawTokenLexer:[12,0,1,""],TextLexer:[12,0,1,""]},"pygments.lexers.sql":{MySqlLexer:[12,0,1,""],PlPgsqlLexer:[12,0,1,""],PostgresConsoleLexer:[12,0,1,""],PostgresLexer:[12,0,1,""],RqlLexer:[12,0,1,""],SqlLexer:[12,0,1,""],SqliteConsoleLexer:[12,0,1,""],TransactSqlLexer:[12,0,1,""]},"pygments.lexers.stata":{StataLexer:[12,0,1,""]},"pygments.lexers.supercollider":{SuperColliderLexer:[12,0,1,""]},"pygments.lexers.tcl":{TclLexer:[12,0,1,""]},"pygments.lexers.templates":{Angular2HtmlLexer:[12,0,1,""],Angular2Lexer:[12,0,1,""],CheetahHtmlLexer:[12,0,1,""],CheetahJavascriptLexer:[12,0,1,""],CheetahLexer:[12,0,1,""],CheetahXmlLexer:[12,0,1,""],ColdfusionCFCLexer:[12,0,1,""],ColdfusionHtmlLexer:[12,0,1,""],ColdfusionLexer:[12,0,1,""],CssDjangoLexer:[12,0,1,""],CssErbLexer:[12,0,1,""],CssGenshiLexer:[12,0,1,""],CssPhpLexer:[12,0,1,""],CssSmartyLexer:[12,0,1,""],DjangoLexer:[12,0,1,""],ErbLexer:[12,0,1,""],EvoqueHtmlLexer:[12,0,1,""],EvoqueLexer:[12,0,1,""],EvoqueXmlLexer:[12,0,1,""],GenshiLexer:[12,0,1,""],GenshiTextLexer:[12,0,1,""],HandlebarsHtmlLexer:[12,0,1,""],HandlebarsLexer:[12,0,1,""],HtmlDjangoLexer:[12,0,1,""],HtmlGenshiLexer:[12,0,1,""],HtmlPhpLexer:[12,0,1,""],HtmlSmartyLexer:[12,0,1,""],JavascriptDjangoLexer:[12,0,1,""],JavascriptErbLexer:[12,0,1,""],JavascriptGenshiLexer:[12,0,1,""],JavascriptPhpLexer:[12,0,1,""],JavascriptSmartyLexer:[12,0,1,""],JspLexer:[12,0,1,""],LassoCssLexer:[12,0,1,""],LassoHtmlLexer:[12,0,1,""],LassoJavascriptLexer:[12,0,1,""],LassoXmlLexer:[12,0,1,""],LiquidLexer:[12,0,1,""],MakoCssLexer:[12,0,1,""],MakoHtmlLexer:[12,0,1,""],MakoJavascriptLexer:[12,0,1,""],MakoLexer:[12,0,1,""],MakoXmlLexer:[12,0,1,""],MasonLexer:[12,0,1,""],MyghtyCssLexer:[12,0,1,""],MyghtyHtmlLexer:[12,0,1,""],MyghtyJavascriptLexer:[12,0,1,""],MyghtyLexer:[12,0,1,""],MyghtyXmlLexer:[12,0,1,""],RhtmlLexer:[12,0,1,""],SmartyLexer:[12,0,1,""],SspLexer:[12,0,1,""],TeaTemplateLexer:[12,0,1,""],TwigHtmlLexer:[12,0,1,""],TwigLexer:[12,0,1,""],VelocityHtmlLexer:[12,0,1,""],VelocityLexer:[12,0,1,""],VelocityXmlLexer:[12,0,1,""],XmlDjangoLexer:[12,0,1,""],XmlErbLexer:[12,0,1,""],XmlPhpLexer:[12,0,1,""],XmlSmartyLexer:[12,0,1,""],YamlJinjaLexer:[12,0,1,""]},"pygments.lexers.teraterm":{TeraTermLexer:[12,0,1,""]},"pygments.lexers.testing":{GherkinLexer:[12,0,1,""],TAPLexer:[12,0,1,""]},"pygments.lexers.textedit":{AwkLexer:[12,0,1,""],VimLexer:[12,0,1,""]},"pygments.lexers.textfmts":{GettextLexer:[12,0,1,""],HttpLexer:[12,0,1,""],IrcLogsLexer:[12,0,1,""],NotmuchLexer:[12,0,1,""],TodotxtLexer:[12,0,1,""]},"pygments.lexers.theorem":{CoqLexer:[12,0,1,""],IsabelleLexer:[12,0,1,""],LeanLexer:[12,0,1,""]},"pygments.lexers.trafficscript":{RtsLexer:[12,0,1,""]},"pygments.lexers.typoscript":{TypoScriptCssDataLexer:[12,0,1,""],TypoScriptHtmlDataLexer:[12,0,1,""],TypoScriptLexer:[12,0,1,""]},"pygments.lexers.unicon":{IconLexer:[12,0,1,""],UcodeLexer:[12,0,1,""],UniconLexer:[12,0,1,""]},"pygments.lexers.urbi":{UrbiscriptLexer:[12,0,1,""]},"pygments.lexers.varnish":{VCLLexer:[12,0,1,""],VCLSnippetLexer:[12,0,1,""]},"pygments.lexers.verification":{BoogieLexer:[12,0,1,""],SilverLexer:[12,0,1,""]},"pygments.lexers.webmisc":{CirruLexer:[12,0,1,""],DuelLexer:[12,0,1,""],QmlLexer:[12,0,1,""],SlimLexer:[12,0,1,""],XQueryLexer:[12,0,1,""]},"pygments.lexers.whiley":{WhileyLexer:[12,0,1,""]},"pygments.lexers.x10":{X10Lexer:[12,0,1,""]},"pygments.lexers.xorg":{XorgLexer:[12,0,1,""]},"pygments.lexers.zig":{ZigLexer:[12,0,1,""]},"pygments.styles":{get_all_styles:[0,2,1,""],get_style_by_name:[0,2,1,""]},"pygments.util":{OptionError:[0,5,1,""],get_bool_opt:[0,2,1,""],get_choice_opt:[0,2,1,""],get_int_opt:[0,2,1,""],get_list_opt:[0,2,1,""]},pygments:{format:[0,2,1,""],formatter:[0,1,0,"-"],formatters:[0,1,0,"-"],highlight:[0,2,1,""],lex:[0,2,1,""],lexer:[0,1,0,"-"],lexers:[0,1,0,"-"],styles:[0,1,0,"-"],token:[18,1,0,"-"],util:[0,1,0,"-"]}},objnames:{"0":["py","class","Python class"],"1":["py","module","Python module"],"2":["py","function","Python function"],"3":["py","attribute","Python attribute"],"4":["py","method","Python method"],"5":["py","exception","Python exception"]},objtypes:{"0":"py:class","1":"py:module","2":"py:function","3":"py:attribute","4":"py:method","5":"py:exception"},terms:{"0000aa":6,"00ff00":7,"00m":17,"01m":17,"01mhello":17,"0b101010":18,"0f0":17,"0x10c":12,"0x20":12,"0xb785decc":5,"0xdeadbeef":18,"14px":7,"16m":[2,7],"42d":18,"42l":18,"6pl":12,"6pm":12,"beno\u00eet":1,"boolean":[0,2],"break":7,"byte":[2,7,15,19],"case":[0,2,3,4,5,7,11,19,21],"char":[2,11,12,18],"cl\u00e9ment":1,"class":[0,2,3,4,5,6,10,12,13,14,15,17,18],"const":18,"dani\u00ebl":1,"default":[0,1,2,3,5,7,11,12,15,17,19],"enum":12,"export":2,"final":[2,11],"float":[0,2,18],"function":[0,2,4,5,6,11,12,14,15,17,18,21],"g\u00f3rny":1,"goto":18,"guti\u00e9rrez":1,"hegg\u00f8":1,"helles\u00f8i":1,"import":[2,4,5,6,7,10,11,12,15,17,18,19],"int":[5,18],"joaqu\u00edn":1,"jos\u00e9":1,"kl\u00e4rck":1,"ko\u017ear":1,"long":[2,18],"lyngst\u00f8l":1,"maik\u00e4fer":2,"matth\u00e4u":22,"micha\u0142":1,"new":[0,2,3,4,5,6,7,10,12,14,17,18,19,22],"null":[7,12],"pr\u00e9vost":1,"public":[2,12],"ren\u00e9":1,"return":[0,5,6,7,11,12,15],"s\u00e9bastien":1,"schneegl\u00f6ckchen":2,"short":[0,3,7,11,12],"st\u00e9phane":1,"static":[0,18],"strau\u00dfenei":2,"super":11,"switch":[2,12],"true":[0,2,5,6,7,11,12,13,15,18],"try":[6,12],"unsch\u00e4rf":2,"var":[2,12,18],"while":[2,3,6,11,12,15,21],Abe:1,Added:[2,12],And:11,BAS:12,But:11,DOS:12,EXE:12,For:[0,3,5,6,7,11,12,13,15,17,18,20,21],GAS:2,Gas:12,IDE:21,IDEs:2,Its:[3,21],One:11,SAS:[1,2],That:[0,4,7,11,14,17,21],The:[2,3,4,5,6,7,8,9,10,12,15,17,18,19,20,21,23],Then:[7,11],There:[2,5,11,14,15,17],These:[7,9,12],Use:[2,8,12],Used:7,Useful:12,Using:[7,8],VBS:12,Vos:1,Will:0,With:[2,7,18],XDS:12,__all__:11,__doc__:18,__init__:[0,4,6,11,18],_by_:2,_format_lin:7,_lua_builtin:12,_php_builtin:12,_style:6,_wrap_cod:7,aa22ff:15,aamann:12,aaron:1,abandon:11,abap:[1,2,12],abaplex:12,abil:2,abl:[1,2,12],abnf:12,abnflex:12,about:[2,3,8,11,12,14],abov:[3,11],absolut:7,abysm:2,academ:12,accept:[2,5,7,12,13,15,19],access:[4,6,7,12,18],accord:12,accordingli:7,accur:2,acm:12,aconf:12,acorn:12,act:12,action:11,actionscript3:12,actionscript3lex:12,actionscript:[2,23],actionscriptlex:12,activ:[1,12],activest:21,actual:[2,12],ada2005:12,ada95:12,ada:[1,2,12,23],adalex:12,adam:[1,2,12],adapt:[5,9,16],adb:12,add:[0,2,5,7,11,14,21],add_filt:[4,5],added:[2,3,4,7,11,17,21,22],adding:[11,14],addit:[2,6,7,11,12,15,21],addition:[4,6,7,11,13],address:22,adjust:4,adl:12,adlf:12,adllex:12,adlx:12,ado:12,adob:12,ads:12,adt:12,advanc:10,affect:[2,7,17],affix:[2,18],afshar:1,after:[2,3,7,11,12,18],again:[6,7,11],against:11,agda:[1,2,12],agdalex:12,aglassing:1,aglet:12,aheui:12,aheuilex:12,ahk:12,ahkl:12,aim:12,alain:1,alastair:1,alex:1,alexand:1,algol:[2,12],algol_nu:12,algorithm:[7,12],ali:1,alia:[0,2,12,14,15],alias:[0,2,11,12,14,18],alias_filenam:0,align:[7,17],all:[0,1,2,3,4,5,6,7,11,13,15,17,18,19,20,21],alloi:[2,12],allow:[0,2,5,7,9,11,12,17],alloylex:12,almost:[11,12],along:3,alphabet:[1,12],alreadi:[0,5,11,12,18],als:12,also:[0,2,3,4,7,11,12,13,16,17,18,19,21,22,23],altern:[2,7,11,12],alwai:[2,4,12,22],amann:[1,2,12],ambient:12,ambienttalk:2,ambienttalklex:12,ambigu:15,among:[0,15,22],amount:[5,11],ampl:[1,2],ampllex:12,ana:[1,2],analog:2,analys:[0,19],analyse_text:[0,2,15],analysi:2,anchor:7,anchorlineno:[1,2,7],andr:[1,2],andrea:[1,2,12],andrei:1,andrew:1,android:12,angl:5,angu:1,angular2:12,angular2htmllex:12,angular2lex:12,angular:12,ani:[0,2,3,7,11,12,17,18,19],annamalai:1,annot:[2,5,12],anonym:11,anoth:[2,5,11,18,19,21],ansi:[1,2,7,12,15,17,21,22],ansiblack:17,ansiblu:17,ansibrightblack:17,ansibrightblu:17,ansibrightcyan:17,ansibrightgreen:17,ansibrightmagenta:17,ansibrightr:17,ansibrightyellow:17,ansibrown:17,ansicolor:17,ansicyan:17,ansidarkblu:17,ansidarkgrai:17,ansidarkgreen:17,ansidarkr:17,ansifuchsia:17,ansigrai:17,ansigreen:17,ansilightgrai:17,ansimagenta:17,ansipurpl:17,ansir:17,ansit:17,ansiturquois:17,ansiwhit:17,ansiyellow:17,antlr:[1,2,12,23],antlractionscriptlex:12,antlrcpplex:12,antlrcsharplex:12,antlrjavalex:12,antlrlex:12,antlrobjectiveclex:12,antlrperllex:12,antlrpythonlex:12,antlrrubylex:12,antonio:9,anymor:2,anyth:[2,3,7,11,12],apach:[2,10,12,23],apache2:[2,12],apacheconf:12,apacheconflex:12,api:[8,11,15],apl:[1,2],apllex:12,apostroph:2,appaiah:1,appear:6,append:11,applescript:[1,2,12,23],applescriptlex:12,appli:5,applic:[2,12,22],approach:[11,12],appropri:[7,11,12],apt:12,arbitrari:11,archetyp:[1,2],arduino:[1,2,12],arduinolex:12,area:7,aren:[6,11,17,18],arexx:12,arg:[0,2,3,7],argument:[0,2,3,4,5,6,7,11,15],armin:[1,22],armstrong:1,arnold:1,around:[2,3,12],arrow:12,art:12,artem:1,articl:7,artifactid:10,as3:[2,12],asax:12,ascii:[3,7,12,19],asciidoc:21,ascx:12,ashkena:1,ashx:12,asi:12,ask:2,aslak:1,aslakhellesoi:12,asm:[2,12],asmx:12,asp:[2,12],aspectj:[1,2,12],aspectjlex:12,aspx:12,assembl:[1,2,23],assign:12,assist:12,associ:11,assum:[7,12,21],assumpt:12,asterisk:11,asymptot:[1,2,12,23],asymptotelex:12,async:2,atom:12,atria:1,attach:[13,18],attent:22,attribut:[0,2,3,6,7,11,12,14,15,18,19],au3:12,aug:[2,12],augea:[1,2,12,23],augeaslex:12,aust:[1,12],author:[12,18,21],autodeleg:12,autodetect:12,autohandl:12,autohotkei:[1,2,12],autohotkeylex:12,autoit:[1,2,12],autoitlex:12,autolisp:12,automat:[2,4,7,11,12,17,19],autopygment:2,aux:12,avail:[0,2,8,11,15,18,21,22],avoid:[2,15],await:2,awar:2,awk:[1,2,12,23],awklex:12,axd:12,b3d:12,background:[2,5,6,7,17],backquot:2,backreferenc:11,backslash:[2,12],backtick:18,backtrack:2,backward:[2,18],bajolet:1,bangert:[1,2],bar:[5,7,11],barfoo:11,baruchel:1,bas:12,base:[0,1,2,7,11,12,18,21,22,23],baselex:11,baselin:7,basemak:12,basemakefilelex:12,bash:[2,12,18,23],bashcomp:9,bashlex:[2,12],bashrc:12,bashsessionlex:[2,12],basic:[0,2,6,11,15,21,23],bat:12,batch:[2,12,23],batchlex:12,battcher:1,baumann:1,baumgart:1,bayer:1,baz:5,bazaar:21,bazbam:12,bazel:12,bb4444:15,bbc:[2,12,23],bbcbasic:12,bbcbasiclex:12,bbcode:[1,2,7,12,21,23],bbcodeformatt:7,bbcodelex:12,bclexer:12,beal:1,bean:12,beauti:21,becaus:[0,2,3,4,6,7,11,12,14,17,18,19],been:[2,5,7,10,11,12,22],befor:[2,7,12,19],befung:[2,12,23],befungelex:[11,12],begin:[7,11,12],behavior:[2,4,17,19],being:[2,11,15,17],below:[11,18,22],ben:[1,2],benediktsson:1,benjamin:1,bergeron:1,bernat:1,bertel:1,bertrand:1,best:19,better:[2,12],between:[2,7,11,12,17],bgcolor:6,bib:12,bibtex:1,bibtexlex:12,big:12,bigaret:1,billingslei:[1,2],bin:[15,18],binari:[2,12,18],bind:12,bird:12,bit:[2,7],bitbucket:21,bitmap:7,bitstream:7,black:17,blackwhitestyl:2,blame:12,blink:12,blinkinsop:[1,2,12],blitzbas:[1,2,12],blitzbasiclex:12,blitzmax:[1,2,12],blitzmaxlex:12,block:[2,7,12],blondon:1,blue:[6,17],bmax:12,bmp:[2,7],bmpimageformatt:7,bmx:12,bnflexer:12,boa:[2,23],boalex:12,board:[7,12,21],bob:1,bodi:12,body_lex:12,bold:[2,6,7,15,17,18],boldfac:[12,17],bom:19,bommel:1,boo:[2,12,18,23],boogi:[1,2,12],boogielex:12,bool:5,boolex:12,border:[6,7,17],borland:12,both:[11,12,15,17],boundari:12,bourdon:[1,2],bpl:12,bplu:12,bpython:21,brace:[2,12],bracket:12,brainfuck:[12,23],brainfucklex:[11,12],branch:21,brandl:[1,22],brian:1,bright:[2,17],brightblack:17,brightblu:17,brightcyan:17,brightgreen:17,brightmagenta:17,brightr:17,brightyellow:17,brillouin:2,bro:[1,2,12],broken:7,browser:[7,11,21],bruce:[1,21],bruno:1,bryan:1,bsd:[2,12,21],bsdmake:12,bst:12,bstlexer:12,buck:12,buffer:[1,2,6,12],bug:[1,2,5,8,12,22],bugfix:[1,2],bugslex:12,build:[2,12,21],builder:21,built:[2,7,12,15],builtin:[0,2,7,8,11,12,15],builtinshighlight:12,bulletin:7,bump:2,bundl:[9,12],bussonni:1,bygroup:11,bz2:[7,12],bzl:12,bzrfruit:21,c99:[2,12],c_cpp:12,c_like:12,ca65:12,ca65lex:12,cacer:1,cach:[2,6,12],cadl:12,cadllex:12,call:[0,2,7,11,12,13,15,17,18,19,20],callaghan:1,caller:11,camil:1,camk:[1,2,12],camkeslex:12,can:[0,2,3,4,5,6,7,9,10,11,12,13,14,15,16,17,18,19,20,22],cangiano:9,cannot:11,canon:12,cap:2,capabl:2,capdl:[2,12],capdllex:12,capit:5,capnp:12,capnproto:12,capnprotolex:12,caption:7,captur:11,care:[0,11],carlo:1,cascad:12,cat:1,catalog:[2,12,23],catastroph:2,categori:[3,12],caus:[2,7,12],cbl:12,cbm:12,cbmba:12,cbmbasicv2lex:12,cdf:12,cdl:12,cell:7,central:10,certain:[2,7,11],cetera:16,ceylon:[1,2,12],ceylonlex:12,cf3:12,cfc:[2,12],cfengine3:[1,2,12],cfengine3lex:12,cfg:[11,12],cfm:12,cfml:12,cfs:12,chai:12,chain:[2,12],chaiscript:[2,12],chaiscriptlex:12,chajda:22,chanc:14,chang:[2,7,12,15,17,20],changelog:[8,20],chapel:[1,2],chapellex:12,charact:[2,3,5,7,11,12,18,19],chardet:[12,19],charl:1,charm:[2,12,23],charmci:12,charmcilex:12,charna:2,chdr:12,chebee7i:1,check:[2,4,11,12],checkspac:12,chee:1,cheetah:[1,2,12,23],cheetahhtmllex:12,cheetahjavascriptlex:12,cheetahlex:12,cheetahxmllex:12,child:[2,11],chines:21,chirino:1,chpl:12,christian:1,christoph:[1,2,12],cirru:[2,12],cirrulex:12,clai:[1,2,12],clariti:7,clash:7,class_too:4,classifi:2,classnotfound:[0,2],classprefix:7,classtoo:4,claylex:12,clean:[1,2],cleanlex:12,clear:22,clexer:[2,12],cli:18,click:7,clipper:12,clj:12,clobber:2,clojur:[1,2,9,12,21,23],clojurelex:12,clojurescript:12,clojurescriptlex:12,clone:[11,20],close:[6,11],closingcommentdelim:12,cls:12,cluster:12,clygment:[9,21],cmake:[2,12,23],cmakelex:12,cmakelist:12,cmd:12,cmdline:1,cob:12,cobjdumplex:12,cobol:12,cobolfre:12,cobolfreeformatlex:12,cobollex:12,cocoa:2,code:[0,2,5,7,9,10,11,13,15,16,17,18,20,21,22],codecov:21,codehtmlformatt:7,codenam:2,codetag:[2,5,7],codetagfilt:5,codetagifi:5,coffe:12,coffeescript:[1,2,12,23],coffeescriptlex:12,coldfus:[2,12,23],coldfusioncfclex:12,coldfusionhtmllex:12,coldfusionlex:12,colin:1,collabor:21,collis:2,colon:[2,3,11],color:[2,3,4,5,6,7,9,12,13,15,17,21],colorama:2,colorfulstyl:17,colornam:7,colorschem:7,colour:7,column:7,com:[1,2,8,12,20,21,23],combin:[0,2,7,11,12],come:[5,17],comma:[2,3],command:[0,2,7,8,9,11,14,18,19,20,21,22],commandchar:7,commandlin:12,commandprefix:[2,7],comment:[2,5,6,7,11,12,15,17],commit:2,common:[1,2,12,21,23],commonli:12,commonlisplex:12,commun:21,compar:[12,18],compat:[2,7,18],compens:21,compil:12,complet:[2,7,11,21],complex:[6,11,12],compon:[2,8,12,15],componentpasc:12,componentpascallex:12,compress:[0,7,12],comput:1,concaten:11,concept:15,concret:12,concurr:12,conf:[2,12],config:[1,2,13,23],configur:[2,13,18,23],conflict:2,confus:[4,11,12],consecut:[5,6],consid:[12,17],consider:[0,12],consist:[2,11],consol:[1,2,3,7,21,23],console16m:7,console256:7,constant:[12,18],constitut:2,construct:[2,7,11,12,18],constructor:[0,5],consum:12,contact:21,contain:[0,2,3,5,6,7,9,11,12,15,21],content:[0,2,7,10,11,12,15],context:[2,11,12],contextu:2,continu:[2,11],contribut:[1,12,23],contributor:8,control:[0,1,2,7,12,20,21,23],convers:12,convert:[0,2,4,5,7,12,15,17,18,21],cookbook:21,cooper:1,coordin:7,copi:[7,9,11,12,13,16],coq:[2,12,23],coqlex:12,corbett:1,corbin:1,corcoran:1,corei:1,corner:2,coroutin:12,correct:[0,2],correctli:[2,7,11,21],correspond:[11,12,17],could:[3,4,7,11,12,15,17],count:12,coupl:2,courier:7,cours:[18,21],cover:[12,14],coverag:21,cpp:12,cppcommentlex:11,cpplexer:12,cppobjdumplex:12,cps:12,cpsa:[1,12],cpsalex:12,cpy:12,cpython:12,crash:[2,11],creat:[2,3,6,7,9,10,11,15,18,21],creation:12,creutzig:[1,2,12],crmsh:[1,2,12],crmshlexer:12,croc:[2,12],croclex:12,crocsrc:12,crompton:1,cross:21,crunchi:2,cry:12,cryptol2:12,cryptol:[1,2,12,23],cryptollex:12,crystal:[1,2,23],crystallex:12,csail:12,csd:12,csh:12,csharp:12,csharpaspxlex:12,csharplex:[2,12],csound:[1,2],csounddocumentlex:12,csoundorchestralex:12,csoundscorelex:12,csrc:12,css:[0,2,3,6,7,13,15,17,23],cssclass:[2,7,15],cssdjangolex:12,csserblex:12,cssfile:7,cssgenshilex:12,csslexer:12,cssphplexer:12,csssmartylex:12,cssstyle:7,ctag:[1,2,7],ctx:11,cucumb:[2,12,23],cuda:[1,2,12],cudalex:12,cuh:12,curli:12,current:[0,3,6,7,11,12,13,18,20],curri:1,curs:21,custom:[2,7,11,16,17],customformatt:[0,3],customlex:[0,3,11],cxx:12,cyan:17,cyp:12,cypher:[2,12],cypherlex:12,cython:[2,12,23],cythonlex:12,dalvik:2,dan:1,daniel:1,darc:[1,2,12],darcspatch:12,darcspatchlex:12,dark:[2,6,7,17],darkbg:7,darkgrei:17,dart:[1,2,12,23],dartlex:12,dash:[1,2],dasm16:[1,2,12],dasm16lex:12,dasm:12,data:[2,6,11,13,18,19],databas:12,date:18,davi:1,david:1,dba:12,dcl:12,dcpu:[12,23],debcontrol:12,debian:[1,2,12,23],debiancontrollex:12,debsourc:12,debug:[12,21],debugg:21,dec:2,decid:12,decis:2,decl:12,declar:[0,2,7,12,18],decod:19,decompress:12,decor:[2,18],deepcopi:2,deepli:11,def:[0,2,4,6,7,11,12],default_styl:17,deferrari:1,defin:[0,2,6,7,11,12,14,17,18],definit:[0,2,3,7,11,14,17,18],degener:2,dejan:1,dejavu:7,delai:2,deleg:12,delegatinglex:[11,12],delet:[11,18],delimit:[2,7,11,12,18],delphi:[12,23],delphilex:12,delroth:2,demonstr:21,denni:[1,2],depend:[0,2,7,10,12,17],deploi:10,deprec:17,deriv:[0,2,12],derivedlex:11,describ:[0,7],descript:12,design:12,desir:[5,7],desis:12,detail:[3,11,21,22],detect:[2,12],determin:[0,2,12,15],develop:[1,3,12,21,22,23],dglexer:12,dhandler:12,dialect:2,dialectopt:12,dialecttag:12,diamanda:21,dict:[6,18,19],dictionari:[0,7,11],didn:3,diego:1,dietmar:1,diff:[2,11,23],differ:[2,5,7,11,12,15,17,19,21],differenti:12,difflex:[11,12,18],digia:12,direct3d:12,direct:[2,7,11,12,13,16,21],directli:[2,11,12],directori:[0,2,3,11,13,17],disabl:7,disabled_modul:12,disabledmodul:12,disallow:2,disappoint:12,displai:[2,7,16],disrupt:5,distinct:7,distinguish:12,distribut:[2,9,11,13,14,16,17,20],div:[7,10,15],divis:12,django:[12,21,23],djangolex:12,dlexer:12,dmitri:1,do_insert:2,dobjdumplex:12,doc:[2,7,12,17,18,19,21,23],docclass:7,docker:[2,12],dockerfil:[2,12],dockerlex:12,docstr:[2,5],doctest:12,doctyp:[2,7],document:[2,3,7,10,11,12,14,15,18,21,22],docutil:2,doe:[2,7,11,12,19],doesn:[2,3,4,6,7,11,12,14,18],domen:1,dominik:1,don:[0,2,3,6,7,11,12,15,17,18,19,22],done:[2,11],doren:1,dos:12,dosbatch:12,doscon:12,dosini:12,dot:[2,5],dotal:11,dotnet:12,doubl:[5,6,18],doug:1,down:11,download:[8,22],dpast:21,dpatch:12,dpr:12,drawn:7,dreiundzwanzig:2,drop:[0,2,5,17,21],dsrc:12,dtd:[1,2,12,23],dtdlexer:12,dubi:12,dubinska:1,due:[0,2],duel:[1,2,12],duelengin:12,duellex:12,duplic:2,dure:12,durni:1,dustin:1,dutton:1,dyl:12,dylan:[1,2,23],dylanconsolelex:12,dylanlex:12,dylanlidlex:12,dynam:12,each:[7,11,18],earl:12,earlgrei:12,earlgreylex:12,earlier:[7,12],easi:[2,4,6,7,11,22,23],easier:[7,10,17,21],easiest:11,easili:[2,6,11,12,16,21,22],easytriev:[1,2,12],easytrievelex:12,eat:[2,5],ebnf:[1,2,12],ebnflex:12,ebuild:12,echdr:12,ecl:2,eclass:12,eclex:12,ecllex:12,ecsrc:12,edg:7,edit:13,editor:21,edoardo:1,edp:12,edu:12,edward:1,eed:7,eee:17,eeeeeee:6,effect:7,efford:1,effting:1,egg:11,egorkin:1,eiffel:[1,2],eiffellex:12,either:[2,6,7,11,15,17],element:[7,11,18],elf:12,elia:1,elisp:12,elixir:[1,2,12],elixirconsolelex:12,elixirlex:12,ellipsi:2,elm:[2,23],elmlex:12,els:[6,7,11,12,15,18],elseif:11,elxir:2,emac:[1,2,3,12,17],emacslisplex:12,email:[2,12,23],emaillex:12,embed:21,emit:11,eml:12,emph:18,emphas:18,empti:[2,7,11,12],emul:17,enabl:[3,7,16],enclos:[3,7,12,18],encod:[2,7,8,10,12,15,17],encount:[11,19],encyclopedia:21,end:[2,4,6,7,11,12,18,21],enforc:5,engin:[2,11,21],english:21,enhanc:[1,2],enough:[7,11,12],enriqu:1,ensur:11,ensurenl:[2,12],enter:[11,12],entir:[11,12],entiti:[11,18],entri:[3,11,15],environ:[2,7,12,21],envnam:[2,7],eps:12,equal:[2,3,11],equal_sign:11,equival:[0,11,12],erb:[12,23],erblex:12,eric:1,erick:1,erl:12,erlang:[1,2,23],erlanglex:12,erlangshelllex:12,erron:11,error:[0,2,5,7,11,12,17,18,21],error_color:7,errortoken:5,es6:2,escap:[2,7,11,12,15,17,18],escapeinsid:7,escript:12,esoter:[2,11],especi:[12,18],etc:[2,3,5,6,18],eval:[0,11],even:[7,11,12,22],event:12,ever:23,everi:[0,2,5,7,11,15,17,22],everybodi:11,everyth:[11,12,14],evoqu:[1,2,12],evoquehtmllex:12,evoquelex:12,evoquexmllex:12,exact:17,exactli:[0,11],exampl:[0,3,4,5,6,7,10,11,12,13,17,18,19,21],examplefil:11,examplelex:11,excclass:5,except:[0,2,5,7,11,12,17,18],exclud:18,exec:10,exher:12,exhibit:2,exhypotheticlex:11,exist:[2,6,7,11,15,18],exlib:12,expand:[5,12],expans:3,expect:[0,3,11],experi:21,experiment:[2,7],explain:[6,14,15],explan:[3,18],explicit:[3,7],explicitli:[3,11],explor:7,express:[2,11,12,18],exrc:12,exs:12,ext:14,extempor:[1,2,12],extemporelang:12,extend:[2,7,11,12,17],extens:[2,3,11,15],extern:[2,7,9,12,13,16],extra:[7,12],extra_keyword:11,extract:[12,14],extrem:11,ezhil:[1,2,23],ezhillex:12,ezt:12,f00:17,f03:12,f90:12,facil:[1,12],fact:[0,12],factor:[1,2,23],factorlex:12,fail:2,failur:2,fallback:12,fallenstein:12,fals:[0,2,5,7,12,18],famili:[2,7],fan:12,fanci:[1,2,12,23],fancylex:12,fancypack:12,fancysrc:12,fancyvrb:7,fantom:[1,2],fantomlex:12,faq:22,far:11,faschingskrapfn:2,fast:[21,22],faster:2,fastest:12,favor:2,favorit:11,fayaa:21,fear:[11,22],featur:[2,7,11,12],feb:2,fed:5,feel:11,felix:1,felixlex:12,fenc:2,fennel:[1,2,12,23],fennellex:12,fernandez:1,few:[2,11,15,17,23],fext:7,ff0000:[6,17],ffffff:17,fhtml:12,ficarra:1,field:12,file:[0,1,2,3,6,7,9,10,11,13,15,17,18,19,21,23],filenam:[0,2,3,7,11,12,13,14,15],filetext:2,filetyp:12,filter:[2,8,12,14,15,18],find:[2,8,11,21],find_lexer_class:0,find_lexer_class_by_nam:[0,2],fine:12,finish:11,first:[0,2,5,6,7,11,12,15,18],firstchild:7,fish:[1,2,12],fisher:1,fishshel:12,fishshelllex:12,fix:[1,2,5,7,11,12,22],fixm:12,flag:[3,5,12,17],flatlin:[1,2,12],flatlinelex:12,flexibl:2,flo:12,florian:1,floscript:[1,2,23],floscriptlex:12,fluidic:21,flx:12,flxh:12,fmarc:2,fmter:7,fname:7,fnl:12,fnmatch:0,focus:21,fold:12,follow:[0,3,6,7,10,11,12,15,17,18,19],font:[2,6,7,15],font_nam:7,font_siz:[2,7],fontfac:7,fontfamili:7,fontsiz:7,foo:[2,5,7,11,12,18],foobar:[11,12],foreground:[7,17],forev:2,form:[0,1,3,6,7,11,12,17],formal:12,format:[0,2,3,6,7,15,21,22],formatt:[1,2,8,10,11,14,17,19],formatternam:0,former:2,forth:1,forthlex:12,fortran:[1,2,23],fortranfix:12,fortranfixedlex:12,fortranlex:12,forum:22,forward:[5,12],found:[0,2,3,7,9,11,12,15,22],four:[2,15],fourth:15,foxpro:[1,2,23],foxprolex:12,frag:12,fragment:[12,15],frame:2,framework:[1,2,21,23],free:[11,12,21],freefem:[1,2,23],freefemlex:12,freepasc:12,freewar:12,friendli:[7,12,17],frit:1,from:[0,1,2,3,4,5,6,7,10,12,13,14,15,16,17,18,19,20],front:21,frt:12,fruiti:2,fsharp:12,fsharplex:12,fsi:12,fulfil:11,full:[2,3,7,8,11,12,15],fulli:2,fulton:1,fun:[12,23],func_name_highlight:12,funcnamehighlight:12,funki:11,further:[7,18],furthermor:[12,17],fusesourc:12,futur:[6,12,18],futurewarn:2,galdino:1,galloi:1,gap:[1,2,12,23],gaplex:12,garg:1,garnotel:1,gas:12,gaslex:12,gautier:1,gave:2,gawk:12,gaynor:1,gdc:12,gemfil:12,gemspec:12,gener:[0,2,5,7,11,17,21,22],genshi:[1,12,23],genshilex:12,genshitext:12,genshitextlex:12,gentoo:2,geo:12,georg:[1,22],gerd:1,gerkin:1,gerwin:1,get:[0,2,5,7,10,11,12,13,18,19,20],get_:2,get_all_filt:5,get_all_lex:[0,12],get_all_styl:[0,17],get_bool_opt:[0,4],get_choice_opt:0,get_formatter_by_nam:[0,15],get_formatter_for_filenam:[0,15],get_int_opt:0,get_lexer_by_nam:[0,2,12,14,15],get_lexer_for_filenam:[0,15],get_lexer_for_mimetyp:[0,15],get_list_opt:0,get_style_by_nam:[0,17],get_style_def:[0,2,3,6,7,15],get_syntax_def:7,get_token:[0,2],get_tokens_unprocess:[0,11],getpreferredencod:19,gettext:[2,12,23],gettextlex:12,gherkin:[1,2,12,23],gherkinlex:12,giedriu:1,gif:[2,7],gifimageformatt:7,gilbert:1,gild:1,git:[2,12,20,21],github:[1,2,8,11,12,20,21,22,23],give:[2,3,5,7,11,15],given:[0,2,3,5,7,11,12,15,19],global:[11,18],glshaderlex:12,glsl:[2,12],glslsrc:12,glue:12,gm2:12,gnu:[2,12],gnumakefil:12,gnuplot:[2,12,23],gnuplotlex:12,gobbl:5,gobblefilt:5,goe:11,goetzmann:1,goj:[1,2],golda:1,golex:12,golo:[2,12],gololex:12,golovizin:1,good:[1,2,6],gooddata:[1,2,12],gooddatacllex:12,googl:[1,2],gordon:1,gosrc:12,goss:1,gosu:[2,12],gosulex:12,gosutemplatelex:12,gotthardt:1,govern:17,gracefulli:2,gradl:12,grai:[6,17],grammar:[11,12],grammar_not:12,graph:21,graphic:7,greater:12,greatli:2,green:17,greg:1,grei:[5,12],groff:[2,12,23],grofflex:12,groovi:[1,2,12,23],groovylex:12,group:11,groupid:10,grow:23,gsp:12,gst:12,gsx:12,guarante:18,guess:[0,2,3,7,12,19],guess_lex:[0,2,15],guess_lexer_for_filenam:[0,15],gui:[12,21],guib:1,guid:[5,12],gvimrc:12,hack:[8,14],hagelberg:1,hahn:1,half:7,haml:[1,2,12],hamllex:12,handl:[0,1,2,7,12,19],handlebar:[2,12],handlebarshtmllex:12,handlebarsj:12,handlebarslex:12,handlecodeblock:12,happen:11,harder:2,harriman:1,harrison:1,has:[0,2,3,4,6,7,9,10,11,12,17,19],hash:2,hashbang:18,haskel:[1,2,23],haskelllex:12,hatch:[1,2,22],have:[0,2,3,4,5,6,7,8,11,12,13,14,15,17,18,19,21,22],haven:[3,19],hax:[1,2],haxelex:12,haxeml:12,hazel:1,hbs:12,hdl:12,hdp:12,hdr:12,head:[3,11,12,18],header:[2,12],headlin:[11,18],headline_callback:11,height:7,hello:[3,10,15,17],help:[0,11,21,22],helper:[2,11],hendershott:1,hendrick:1,herbstzeitlos:2,here:[6,7,9,10,11,14,15,17,21,23],heredoc:[2,18],hermoso:1,hess:1,hex:[2,6,12,18],hexadecim:[6,18],hexcat:12,hexdump:[1,2,12],hexdumplex:12,hierarchi:18,high:[12,14],higher:[7,21],highest:[0,15],highlight:[0,2,3,5,7,9,10,11,12,13,14,15,17,18,21,22],highlightt:7,hilit:21,hint:8,hiram:1,hiremath:1,hiroaki:1,histori:12,hl_color:[2,7],hl_line:[2,7],hlsl:[1,2,12,23],hlsli:12,hlslshaderlex:12,hoelz:1,hogan:1,hold:11,holli:1,home:22,hong:1,horizont:7,horn:1,host:[12,21,22],houghton:1,how:[0,2,6,7,10,11,12,15,16,17],howard:1,howett:1,howev:[2,7,15,21],hpp:12,hrl:12,hsa:12,hsail:[1,2,12],hsaillex:12,hspec:[2,12,23],hspeclex:12,htaccess:12,htdoc:13,htm:[7,11,12],html5:[2,7],html:[0,1,2,3,7,11,13,15,17,18,21,22,23],htmlcheetah:12,htmldjango:12,htmldjangolex:12,htmlformatt:[0,2,6,7,10,15,17,19],htmlgenshilex:12,htmllexer:[11,12],htmlphplexer:[11,12],htmlsmartylex:12,http:[1,2,8,10,12,20,21,23],httpd:12,httplexer:[2,12],huge:11,human:[0,12],hundr:11,hurl:21,hxml:12,hxmllexer:12,hxsl:12,hxx:12,hyb:12,hybri:[1,2,12],hybrislex:12,hylang:12,hylex:12,hyperlink:7,hypothet:11,hypotheticlex:11,i18n:2,i6t:12,i7x:12,iOS:2,ian:[1,2],icl:12,icn:12,iconlex:12,icu:12,icw:12,idc:12,idea:[2,10],ideal:12,identifi:[0,2,12,18],idl4:12,idl:[1,2,23],idllex:12,idr:12,idri:[1,2,12],idrislex:12,iec:12,ieee:12,ietf:12,iex:12,ignor:[6,7,12],ignorecas:11,igor:[1,2],igorexchang:12,igorlex:12,igorpro:12,ijs:12,imag:[1,2,7,12],image_format:7,image_pad:7,imageformatt:[2,7],img:7,immedi:0,implement:[0,2,6,7,12],implicit:18,imposs:11,improv:[1,2,5,7],inc:[1,12],incl:23,includ:[0,2,3,7,11,13,15,16,19,20,21],inclus:11,incollo:21,incompat:2,incomplet:[2,21],incorrect:2,incorrectli:12,increas:[7,11,22],incred:11,indent:[2,5,7,12],index:[0,7,11,20],indexerror:11,indic:[2,7,11],individu:7,industri:12,inencod:[2,3,19],inf:12,infer:3,inferno:1,infinit:2,influenc:11,info:12,inform6:12,inform6lex:12,inform6templatelex:12,inform7:12,inform7lex:12,inform:[1,2,3,7,11,12,14,15,18],ing:2,inherit:[1,2,11,17],ini:[2,11,12,23],inilex:[11,12],initi:[2,5,6,11],initialis:11,inkpot:2,inlin:[2,6,7,15],inlinestyl:13,ino:12,inozemtsev:1,input:[0,2,3,7,11,12,15,19],inputenc:7,insensit:[0,2],insert:[2,11,18],insid:[7,11,18],instal:[2,3,7,8,11,19,21],instanc:[0,2,4,5,10,11,12,13,17,18],instanti:[0,4,6,11],instead:[2,6,7,11,12,15,17,21],instruct:12,int_fict:12,integ:[0,12,18],integr:[2,7,12,21],intel:[2,12],intellig:21,interact:2,interchang:12,interfac:[0,2,8,12],intern:[4,11,15,19],internet:7,interpol:[2,18],interpret:[0,10],intr:12,introduc:[2,7,11,18],introduct:8,invalid:2,invari:11,invoc:7,iok:[1,2,12],iokelex:12,iokesrc:12,iolang:12,iolex:12,iosrc:12,ipf:12,ipython:2,irb:[12,23],irc:[2,7,12,23],ircformatt:7,irclog:12,irclogslex:12,irssi:[12,23],is_token_subtyp:18,isabel:[1,2,12],isabellelex:12,isn:[2,5,6,11,12],iso:12,issu:[2,22],ital:[6,7,12,17],italic:7,item:[6,11,14],iter:[0,2,5,6,7,11,17],itoh:1,its:[0,2,11,12,15,17,19,21],itself:11,ivan:1,jackson:1,jade:[1,12],jag:[1,2,12],jagslex:12,jame:1,jan:2,jann:1,januari:12,jar:10,jarrett:[1,2],jasmin:[1,2,12],jasminlex:12,jasminxt:12,java:[2,8,12,18,21,23],javalex:12,javascript:[2,7,11,18,23],javascriptdjangolex:12,javascripterblex:12,javascriptgenshilex:12,javascriptlex:[2,11,12],javascriptphplex:12,javascriptsmartylex:12,jbst:[1,2,12],jcl:[1,2,12],jcllexer:12,jeffrei:1,jeremi:[1,12],jerith:12,jerom:1,jesper:1,jinja2:2,jinja:[12,23],jlcon:12,jlexer:12,job:12,jochen:1,joe:1,joerg:1,john:1,join:[2,6],jon:1,jona:1,jordi:1,jpeg:7,jpg:[2,7],jpgimageformatt:7,jproperti:12,jsgf:[1,2,12],jsgflexer:12,jsm:12,json:[1,2,12],jsonbareobjectlex:12,jsonld:12,jsonldlex:12,jsonlex:12,jsonml:12,jsp:[12,23],jspeech:12,jsplexer:12,julia:[1,2],juliaconsolelex:12,julialex:12,jun:2,just:[2,6,7,11,12,17,18,22],justin:1,juttl:12,juttlelex:12,jython:[2,10,21],kaarsemak:[1,2],kabak:1,kal:[1,2,12],kallex:12,kalnitski:1,kashif:1,kconfig:[1,2,12],kconfiglex:12,keep:[2,7],kei:[0,6,11,12,17],ken:[1,12],kept:11,kernel:12,keyboardinterrupt:2,keyword:[0,2,4,5,6,12,15,17],keywordcas:[3,5],keywordcasefilt:5,kid:12,kif:12,kind:[2,18],kiril:[1,2],kirk:[1,2],kit:1,kki:12,klein:1,knibb:1,know:[2,15,18,21],knowledg:22,known:[0,3,5,11,17,21],koka:[2,12],kokalex:12,koltsov:1,konrad:1,koprowski:1,korean:12,kotlin:[1,2,12],kotlinlex:12,kowarsch:1,krekel:1,kriegisch:1,kristian:1,krzysiek:[1,2],kschutt:12,ksh:12,kubica:[1,2],kumar:1,kupperschmidt:1,kurt:1,kurzbach:1,label:[2,18],lack:23,lagda:12,lambda:2,lang_builtin:11,languag:[0,2,11,15,18,22],language_lex:11,larger:[7,13],larim:1,lasso:[1,2,12,23],lassocsslex:12,lassohtmllex:12,lassojavascriptlex:12,lassolex:12,lassoscript:12,lassoxmllex:12,lassu:1,last:[3,6,11,12,19],lasttyp:6,lastval:6,later:[6,7,11,12],latest:[20,22,23],latex:[1,2,7,12,15,17,21,22],latexformatt:[2,6,7],latin1:[2,3,19],latin:12,latter:[2,6,7],laurent:1,layman:1,layout:12,lazi:12,lcry:12,lcryptol:12,lead:[2,11,12,15],leaf:1,lean:[2,12],leanlex:12,learn:22,ledru:1,lee:1,left:[6,12],length:[7,11,12],lenient:12,less:[2,4,12],lesscss:1,lesscsslex:12,lessfilt:2,let:[6,12,15],letter:[5,7,12],level:12,lex:[0,2,11,12,21,22],lexem:12,lexer:[1,2,4,5,6,7,8,10,13,14,17,18,19,21,22,23],lexercontext:11,lexernam:0,lgt:12,lhaskel:12,lhs:12,librari:[2,6,7,9,12,15,19,21,22],licens:[2,18,21],lid:[2,12],lidr:12,lidri:12,life:12,light:[6,7,17],lightbg:7,lighti:12,lighttpd:[2,12,23],lighttpdconflex:12,like:[0,2,3,6,7,9,10,11,15,16,18,21,22],limbo:[1,2,12],limbolex:12,linden:12,line:[0,2,5,7,8,11,12,14,18,19,21,22],line_numb:7,line_number_bg:7,line_number_bold:7,line_number_char:7,line_number_fg:7,line_number_ital:7,line_number_pad:7,line_number_separ:[2,7],line_number_start:[2,7],line_number_step:7,line_pad:7,lineanchor:[2,7],lineno:[2,3,7,15],linenospeci:7,linenostart:7,linenostep:7,linenumb:7,linesepar:7,linespan:[2,7],linewis:12,linh:1,link:[2,7,12,21],linux:[12,20],liquid:[2,12],liquidlex:12,lisp:[1,2,12,18,23],list:[0,2,3,5,7,8,12,14,15,20,21,22,23],listen:2,liter:[1,2,6,7,11,12,23],literateagdalex:12,literatecryptollex:12,literatehaskelllex:12,literateidrislex:12,litstyl:12,littl:[2,12],live:12,livescript:[1,2,12],livescriptlex:12,llvm:[2,12,23],llvmlexer:12,load:[0,1,2,3,11,12],load_formatter_from_fil:[0,2],load_lexer_from_fil:[0,2,11],local:[2,3,11,19],locat:2,log:[1,2,12,22,23],logo:[1,2,12],logoslex:12,logtalk:[1,2,12,23],logtalklex:12,longer:[2,11],longest:11,longnam:0,look:[0,3,5,7,10,11,12,15,17,18,21,22,23],lookup:[0,11,14],loop:[2,15],lorentz:1,lot:[2,10,11],loui:1,lovelac:[1,2],lower:5,lowercas:[5,12],lsl:[2,12],lsllexer:12,lsp:12,lua:[1,2,12,23],lualex:[11,12],lubomir:1,luca:1,luka:1,m2iso:12,m2pim:12,m2r10:12,mabei:1,mac:[2,7,12],macarthur:1,machineri:21,macro:[1,6,7],made:[2,11],magenta:17,magic:[2,18],mai:[0,2,3,5,7,11,12,15,17,23],mail:[21,22],main:[3,7,11,12],mainfram:12,mainli:12,maintain:[1,17,22],major:[1,2,20],mak:12,make:[2,4,7,10,11,12,15,17,19],makefil:[2,23],makefilelex:12,mako:[1,2,12,23],makocsslex:12,makohtmllex:12,makojavascriptlex:12,makolex:12,makoxmllex:12,malform:12,malzeug:2,manag:[12,20,21],mandatori:0,mandel:1,mani:[1,2,7,11,16,17,22],manpag:[2,12],manual:12,mao:12,map:[2,7,11,12,13,15,17],mapfil:11,maql:[1,12],maqllex:12,mar:2,marchand:1,marek:[1,2],margaritelli:1,margin:7,mario:1,mark:[1,5,11,18],markdown:[1,2,12],markdownlex:12,marker:12,markup:[2,6,7,13,15,18,21],martin:1,mask:12,maskj:[1,2],masklex:12,mason:[1,2,12,23],masonlex:12,master:12,match:[0,2,11,12,15,18],math:[2,7,12],mathematica:[2,12],mathematicalex:12,mathescap:7,matlab:[1,2,23],matlablex:12,matlabsess:12,matlabsessionlex:12,matt:[1,2],matteo:1,matter:14,matthew:1,matthia:[1,12],mauricio:1,maven:10,mawk:12,max:[1,12],maxim:12,maximum:11,mayb:12,mcdonald:[1,2],mcgregor:1,mckamei:1,mckee:1,mckenna:1,mclaughlin:1,mean:[0,2,3,5,7,11,12,14,16,19,21],meant:18,mechan:[0,7,15,22],media:2,member:[11,12],menlo:7,mention:14,menu:9,menuconfig:12,mercuri:21,merg:[5,11],messag:[12,18],meta:[7,12],metacharact:11,metaclass:[0,6],metadata:14,metamodel:12,method:[0,2,3,4,5,6,7,11,12,15,18],meuser:1,mher:1,mhtml:12,michael:1,michiel:1,micro:12,microsoft:7,middl:5,might:[6,11,12,17,19],miikka:1,mike:1,miller:1,mime:[0,2],mimelex:12,mimetyp:[0,2,12],minhe:1,minid:[1,2,12,23],minidlex:12,minidsrc:12,minim:[6,12],minimum:2,minor:2,mior:1,mirc:1,misc:2,misdetect:2,mishandl:2,mishighlight:2,miss:[2,11,12],mit:12,mitchen:1,mix:12,mixtur:12,mli:12,mll:12,mly:12,mma:12,mod:12,mode:[2,7,12],modelica:[1,2,12,23],modelicalex:12,modelin:[1,2],modelvers:10,modif:11,modifi:[0,7,15],modul:[0,2,4,5,7,11,12,14,15,18],modula2:12,modula2lex:12,modula:[1,2,23],modulo:12,mof:12,moin:[2,12,13],moinmoin:[2,8,12,23],moinwikilex:12,mondrian:17,mondrianstyl:17,monkei:[2,12],monkeylex:12,mono:[2,7],monofont:7,monokai:[1,2],monospac:7,mont:[1,2],montelex:12,moo:12,moocod:[1,2,12],moocodelex:12,moon:12,moonscript:[1,2,12],moonscriptlex:12,morai:1,more:[2,3,6,7,10,11,12,15,22],morton:1,most:[0,2,6,7,11,12,17,20,22],moura:1,move:12,movsisyan:1,mozhashpreproc:12,mozilla:[2,12],mozpercentpreproc:12,mozpreproc:12,mozpreproccsslex:12,mozpreprochashlex:12,mozpreprocjavascriptlex:12,mozpreprocpercentlex:12,mozpreprocxullex:12,mq4:12,mq5:12,mqh:12,mql4:12,mql5:12,mql:[2,12],mqllexer:12,msc:12,mscgen:[1,2,12],mscgenlex:12,msdo:[1,2,12],msdossessionlex:12,much:[2,11,12],muhamedag:1,mulitpart:12,multi:[2,11,18,21],multilin:[2,11,18],multipart:12,multipl:[2,7,12],mupad:[1,2,12,23],mupadlex:12,must:[0,3,5,6,7,11,12,15,17],muthiah:1,mxml:[1,2],mxmllexer:12,myghti:[1,2,12,23],myghtycsslex:12,myghtyhtmllex:12,myghtyjavascriptlex:12,myghtylex:12,myghtyxmllex:12,mygthi:12,mylex:11,mynewlex:11,mypythonlex:11,mysql:[2,12,23],mysqllex:12,mystyl:17,myt:12,nafu:7,nam:1,name:[0,2,4,5,7,11,12,13,14,15,17],namehighlightfilt:[2,5],namespac:[1,2,18],nasm:[2,12],nasmlex:[2,12],nasmobjdumplex:12,nathan:1,nativ:[7,12],naveen:1,nawk:12,nbp:12,nbsp:18,ncar:2,ncl:12,ncllexer:12,nearest:7,nearli:2,necessari:7,need:[0,2,4,6,7,11,12,14,15,21,22],needl:11,neg:12,nelson:[1,2],nemerl:[1,2,12,23],nemerlelex:12,neo4j:12,nesc:[1,2,12],nesclex:12,nescsrc:12,nest:[2,11,12],net:[2,23],neufeld:1,neujahr:2,never:11,nevertheless:11,new_stat:11,newest:2,newli:[11,18],newlin:[2,5,7,11,12],newlisp:[2,12],newlisplex:12,newspeak:[2,12],newspeaklanguag:12,newspeaklex:12,next:[11,12],ng2:12,nginx:[2,12,23],nginxconflex:[2,12],nguyen:1,nick:1,nil:[1,12],nimrod:[1,2,23],nimrodlex:12,nit:[1,2],nitlex:12,nix:[1,2,7],nixlex:12,nobackground:7,nobodi:18,nobold:17,noclass:[2,7],noclobber_cssfil:[2,7],noehr:1,noinherit:17,noital:17,nolta:1,non:[2,3,7,11,17,19],none:[0,4,6,7,11,12,18],nonempti:7,nontermin:12,nonzero:5,normal:[0,3,4,5,11,12,18],norman:1,north:1,nose:2,notabl:12,note:[4,5,7,11,12,14,15,17,18,19,23],notebook:2,noth:11,notifi:22,notmuch:[2,12,23],notmuchlex:12,nounderlin:17,nov:2,now:[2,7,11,12,19],nowrap:7,nprint:15,nqp:12,nresult:10,nroff:12,ns2:12,nsh:12,nsi:[2,12],nsislex:12,nth:7,nullformatt:[6,7],number:[2,3,5,6,7,11,12,15,17,18,21,22],numer:[2,12],numpi:[1,2,12],numpylex:12,nusmv:[1,2,12],nusmvlex:12,nvidia:12,obei:7,obj:12,objc:[2,12],objdumb:12,objdump:[2,12],objdumplex:12,object:[0,1,2,5,6,7,11,15,18,19,23],objectivec:[1,2,12],objectiveclex:12,objectivecpplex:12,objectivej:12,objectivejlex:12,objectpasc:12,objj:12,objm2:12,obrist:1,obviou:11,ocaml:[2,12,23],ocamllex:12,occasion:19,occur:11,oct:[2,18],octal:18,octav:[1,2,12,23],octavelex:12,odbc:12,odd:2,odin:12,odinlex:12,off:[0,5,12],offload:2,offset:7,often:[11,16],old:[2,17,18],oldhtmlformatt:6,oleh:1,oliva:1,olivi:1,olov:1,omg:12,omit:3,onclick:7,one:[0,2,3,5,6,7,11,12,15,17,19],ones:[0,11,12],onli:[0,2,3,7,11,12,15,18,21],onto:11,ooc:2,ooclex:12,opa:[1,2,12],opalang:12,opalex:12,open:[6,11,14,22],openbug:12,opencobol:[1,2,12],openedg:[1,2,12],openedgelex:12,opengl:12,openingcommentdelim:12,oper:[1,2,11,17],optim:[2,11],option:[1,2,4,5,6,9,11,12,13,17,19],optionerror:0,optnam:0,orc:12,orchestra:12,order:[2,12,15,21],ordinari:12,org:[1,10,12,22],origin:[2,12],other:[1,2,7,11,13,17,18,19,21,22],otherlex:11,otherst:11,otherwis:[0,7,11,12],out:[2,3,5,10],outencod:[2,3,7,19],outfil:[0,6,7,15],outlin:21,output:[0,2,3,4,5,6,7,11,13,15,18,19,21,22],outsid:[11,12],over:[0,2,6,7,11,15,17,22],overhaul:1,overload:18,overrid:[0,2,3,4,6,7,11,12,19],overridden:0,overview:[15,17],overwrit:6,overwritten:7,owen:1,own:[0,5,7,8,18,23],oxford:1,ozarowski:2,p6l:12,p6m:12,pace:22,pacemak:12,packag:[2,7,11,18],pacman:12,pacmanconf:12,pacmanconflex:12,pad:7,page:[0,2,7,10,11,13,14,15,23],paid:22,paint:21,pair:[0,11],pan:[2,12],panlex:12,pannuto:1,paper:12,paramet:[6,11],parasail:[1,2],parasaillex:12,paren:2,parent:[6,11,18],parenthesi:11,paris:1,pars:[2,6,12,21],parser:[2,13,18],part:[6,11,12,18],partial:2,particular:[3,7,12],partner:12,pas:[3,12],pascal:[2,3,5],pass:[2,3,4,6,7,11,12,15,17,19],past:[7,12],pastebin:[12,21],pastebox:21,pat:1,patch:[1,2,18],path:[7,11,12,18,21],patrick:1,pattern:[0,2,7,11,12,14,15],patx:21,paul:1,paulo:1,pawn:2,pawnlex:12,pcmk:12,pdf:[12,21],peculiar:2,pekka:1,peopl:16,pep:2,pepijn:1,per:[2,3,12,17,19],percent:12,perfect:7,perform:[2,11,12,18],perl6:12,perl6lex:12,perl:[1,2,15,18,22,23],perllex:[12,15],permit:12,persist:12,pete:1,peterson:1,pfannschmidt:1,pgsql:12,phil:1,philosophi:17,php3:12,php4:12,php5:12,php:[1,2,9,11,18,21,23],phpbb:7,phplexer:[11,12],phpygment:[9,21],phtml:12,picheta:1,pick:7,pida:21,piec:[7,15],pierr:[1,2],pig:[1,2,12],piglex:12,pigment:21,pike:[1,2,12],pikelex:12,pil:[2,7],pilcrow:5,pim:12,pinkham:1,piotr:2,pip:[2,20],pipe:[12,15],pixel:7,pixmap:7,pkg:12,pkg_resourc:2,pkgbuild:12,pkgconfig:12,pkgconfiglex:12,pl6:12,place:[2,12,20,22],plain:[2,12,15],platform:[12,20,21],player:12,pleas:[7,8,14,21],plot:2,plpgsql:12,plpgsqllexer:12,plrm:12,plt:12,plu:[2,7,11,12,23],plugin:[8,12,13,17,21],pm6:12,pmod:12,png:[2,7],pocoo:22,pod:2,point:[5,7,11],polici:12,pom:10,poni:[2,23],ponylex:12,pop:[2,11],popular:12,port:2,pos:11,posh:12,posit:[0,2,11],posix:12,possibl:[2,6,7,10,11,12,15,18,19,21],post:7,postgr:12,postgresconsolelex:12,postgreslex:12,postgresql:[1,2,12],postmarkup:21,postscr:12,postscript:[1,2,12,23],postscriptlex:12,pot:12,pov:[2,12,23],povrai:[12,23],povraylex:12,power:[11,21],powershel:[1,2,12,23],powershelllex:12,powershellsessionlex:12,praat:[1,2],praatlex:12,pre:[2,6,7,10,12,15],preambl:7,preced:[2,11],prefer:11,prefix:[2,7,11,12,14,18],preimplement:11,prepar:11,prepend:[3,7],preproc:18,preprocess:12,preprocessor:[2,9,11,12,18],present:[2,12,21],preserv:7,prestyl:[2,7],prettifi:22,prevent:[11,17],previou:11,previous:2,prg:12,primari:[12,15],primit:12,principl:18,print:[3,7,10,12,15,17,18,21],printabl:12,println:10,prioriti:2,privaci:21,pro:[1,2],probabl:[0,4,12],problem:[2,7],proc:12,procedur:[2,11,12],process:[2,3,11],processor:[1,2,9],produc:[0,2,7,11,12,15],profil:12,progopedia:21,program:[2,16,18,22],progress:12,project:[2,7,10,17,21,22],prolog:[2,23],prologlex:12,prompt:[2,12,18],proof:12,proper:11,properli:[2,21],properti:[2,7,12],propertieslex:12,propos:18,proprietari:12,proto:2,protobuf:12,protobuflex:12,protocol:[1,2,12],prototyp:12,prover:12,provid:[0,2,3,6,7,9,11,12,17],prs:12,prynn:1,prypin:1,ps1:12,ps1con:12,psc:12,pseudo:[11,12,18],psi:12,psl:12,psm1:12,psql:12,pth:2,publicli:11,publish:12,pudb:21,pug:12,puglex:12,pull:[2,23],pumbaa80:2,punctuat:[2,12],puppet:[1,2,12],puppetlex:12,pure:12,purpos:[6,12,15],push:11,put:[7,11,12,13,17],pwn:12,pxd:12,pxi:12,py2:12,py2tb:12,py3:12,py3tb:12,pybtex:12,pycon:12,pygment:[1,3,4,5,6,7,11,14,15,17,18,19,20,22,23],pykleur:2,pypi:[1,2,7,12,20],pypylog:12,pypyloglex:12,pyrex:12,pytb:12,python2:12,python2lex:[2,12],python2tracebacklex:12,python3:[2,12],python3lex:[2,12,17],python3tracebacklex:[2,12],python:[1,2,3,7,9,10,11,13,15,16,18,20,21,22,23],pythonconsolelex:[2,12],pythoninterpret:10,pythonlex:[2,5,10,11,12,15,19],pythontracebacklex:[2,12],pyw:12,pyx:12,qbasic:[2,12],qbasiclex:12,qbs:12,qbzr:21,qdeclarativeintroduct:12,qml:[1,2,12],qmllexer:12,qualifi:12,qualiti:22,quickstart:[2,8],quit:[2,4,7,21,22],quot:[2,3,10,15,18],quotat:5,qvt:[1,2],qvto:12,qvtolex:12,r10:12,r5r:12,rabel:1,racket:[1,2,12],racketlex:12,ragel:[1,2,12,23],ragelclex:12,ragelcpplex:12,rageldlex:12,ragelembeddedlex:12,rageljavalex:12,ragellex:12,ragelobjectiveclex:12,ragelrubylex:12,rai:[2,23],raichoo:1,rainbow:[1,2],rais:[0,2,5,7,12],raiseonerror:5,raiseonerrortokenfilt:[2,5],rake:12,rakefil:12,rang:[0,12,21,22,23],rare:2,rasul:1,rather:11,raw:[2,7,18],rawtokenformatt:[2,7,12],rawtokenlex:[7,12],raytrac:12,rbcon:12,rbnf:12,rbw:12,rbx:12,rconsol:12,rconsolelex:12,rdlexer:12,rdoc:2,read:[7,11,12,22],readabl:0,readili:6,realli:[7,11,12,14,18],reason:12,reb:12,rebol:[1,2,23],rebollex:12,rebuild:11,receiv:2,recent:12,recogn:[0,2,3,11,12],recognis:[7,12,18],recognit:2,recommend:[7,20],record:12,recreat:12,recurs:11,recurss:12,red:[2,6,7,12,15,17,23],redcod:[1,2,12,23],redcodelex:12,redlex:12,reduc:2,redund:15,reed:1,refactor:1,refer:[2,4,6,7,8,11,12,15],reg:12,regard:3,regedit:12,regeditlex:12,regex:[2,18,22],regist:[0,2,4,5,8,12,13,17],registri:[2,12],regress:2,regular:[2,7,11,18,19],reidi:1,rel:[0,3,7,11],relas:2,relaxng:12,releas:[2,20,22,23],reli:14,remov:[2,7,11],renam:[12,13],render:[2,7,9,11,12,17],renviron:12,repeat:11,repl:12,replac:[2,5,7,11],report:12,repositori:21,repr:7,repres:[15,18],represent:7,request:[2,7,21,23],requir:[2,11,12,14,22],requiredelimit:12,reserv:[12,18],resolv:11,resort:[3,19],resourcebundl:[2,12],resourcelex:12,respect:[2,7,17],respons:13,rest:[2,5,8,11,12,22,23],restrict:12,restructur:2,restructuredtext:[2,12,21],result:[0,2,7,10,11,12,15,17,19],retain:2,reuben:1,review:21,revis:12,rewrit:[1,2],rewritten:12,rewrot:2,rex:12,rexx:[1,2,12],rexxlex:12,rfc822:12,rgb:17,rhistori:12,rhtml:12,rhtmllexer:12,richard:1,richardson:1,right:[5,19],rigor:12,rintel:1,risc:12,rkt:12,rktd:12,rktl:12,rnc:12,rnccompactlex:12,rng:12,rob:1,roberg:[1,2],robert:1,roboconf:[1,2],roboconfgraphlex:12,roboconfinstanceslex:12,robot:[1,2,23],robotframework:12,robotframeworklex:12,roff:12,rolling:1,roman:2,ronach:[1,22],ronni:1,roo:1,root:[7,11,12],root_lex:11,rostyslav:1,roughli:7,rout:12,roux:21,row:7,rpf:12,rpm:[2,12,23],rpmspeclex:12,rprofil:12,rql:[2,12],rqllexer:12,rrggbb:6,rrt:2,rsl:[1,2,12],rsllexer:12,rss:12,rst2pdf:21,rst:[12,16,18],rstlexer:12,rtf:[1,2,7,12,15,21,22],rtfformatt:7,rts:12,rtslexer:12,rubi:[1,2,9,11,18,21,23],rubiniu:12,rubyconsolelex:[2,12],rubylex:[12,15],rudolph:1,ruggier:1,rule:[2,3,7,11,12,13,18],run:[0,7,10,11,12,21],runtim:10,runtimeerror:[7,18],rust:[1,2,23],rustlex:12,rvt:12,sage:12,salminen:1,salt:12,saltstack:12,sam:1,same:[2,3,5,6,7,11,12,17,18],sampl:[9,11,13,16],samplemanag:12,san:[2,7],sandalski:1,sane:7,sap:12,sarl:12,sarllex:12,sas:12,saslex:12,sass:[1,2,12],sasslex:12,sasso:1,save:[2,11],sbatch:12,sbl:12,scala:[1,2,12,23],scalalex:12,scalat:12,scaml:[1,12],scamllex:12,scd:12,scdoc:[2,23],scdoclex:12,sce:12,scenario:8,scene:[2,23],schafer:1,schemaloc:10,scheme:[1,2,12,23],schemelex:12,schutt:[1,12],schwaiger:1,schweizer:1,schweyer:1,sci:12,scientif:[12,21],scilab:[1,2,12,23],scilablex:12,scm:12,sco:12,sconscript:12,sconstruct:12,scope:[10,21],score:[2,12],screen:13,script:[2,3,11,14,15,16,21,23],scss:[2,12],scsslexer:12,search:[2,11],sebastian:1,second:[2,11,12],secondari:15,section:[11,21],see:[0,2,3,5,6,7,11,12,15,17,19,21,22],seem:11,seen:3,sel4:12,select:[0,2,3,7,11,12,15],selector:[2,3,7,15],self:[0,4,6,7,11,12,18],semant:15,semicolon:2,send:[6,11,12,19,22],sensit:2,sep:2,separ:[2,3,7,11,12],sequenc:[0,2,7,15,17,18,21,22],sequenti:12,serial:12,server:[2,12,23],servic:[1,21],session:[1,2,23],set:[2,6,7,9,10,11,12,13,15,17,18,19],setup:3,setuptool:14,sever:[2,11,12],sgf:[1,2,23],shade:17,shader:[12,23],shadow:12,shape:12,share:21,shaw:1,sheet:[3,12],shell:[1,2,3,21,23],shellscript:12,shellsess:[1,12],shellsessionlex:2,shen:[1,2,12],shenlex:12,shex:12,shexc:12,shexclex:12,ship:[6,9,11,13,16,17],shorten:12,should:[0,2,5,6,7,11,12,13,14,17],shouldn:[2,11],show:[2,7,11,12],shown:[7,11],siafoo:21,sieker:1,sig:12,sign:[2,3,5,11],signatur:18,sil:12,silent:2,silver:[2,12],silverlex:12,similar:[2,11,15,18],simmon:1,simon:1,simonov:[1,2],simpl:[2,9,10,11,22],simplefilt:4,simpli:11,simplifi:2,simplist:12,simpson:1,sinc:[0,2,3,4,7,9,10,11,12,15,17,19],sing:1,singl:[2,11,18,19],singlelin:11,singleton:18,sircmpwn:12,site:21,size:[6,7],skip:11,skylark:2,slash:[1,2,11,14,23],slashlex:12,slexer:[2,12],slightli:2,slim:[2,12],slimlex:12,slowdown:12,slowish:2,sls:12,slurm:[2,12,23],slurmbashlex:12,smali:[1,2,12],smalilex:12,small:[2,12,15,17],smaller:2,smalltalk:[1,2,23],smalltalklex:12,smart:[3,19],smarter:2,smartgameformatlex:12,smarti:[12,23],smartylex:12,smishlajev:1,sml:12,smllexer:[11,12],snapshot:10,snippet:[12,15,16,17,21],snobol4:12,snobol:[1,2,23],snobollex:12,snowbal:[1,2,12],snowballlex:12,softwar:[1,12],sol:12,solar:[1,2],solid:[2,23],soliditylex:12,solvabl:11,some:[0,2,4,6,7,11,12,15,17,18,21],somelex:[3,11],someth:[6,10,11,15,18],sometim:[7,11],somewhat:12,sound:11,sourc:[1,2,5,7,9,11,13,14,15,16,19,21,22],sourcecod:[2,7,12,16],sourcepawn:[1,2,12],sourcepawnlex:12,sourceslist:12,sourceslistlex:12,space:[2,3,5,7,11,12,17,23],spacehack:7,spam:[11,15],span:[2,3,7,10,15],sparql:[1,2,12],sparqllex:12,spec:[2,12,23],special:[2,3,5,7,11,18,22],specif:[2,3,7,11],specifi:[0,3,6,7,11,12,17,18],speed:2,spell:11,sphinx:[2,21],spigarelli:1,spitfir:[2,12],split:[0,2,3,12,15,17,18],splitlin:2,splu:12,spt:12,spyder:21,sql:[1,2,23],sqlite3:[2,12],sqlite:23,sqliteconsolelex:12,sqllexer:12,squeak:12,squid:[1,2,12,23],squidconf:12,squidconflex:12,squiggli:2,src:12,ssp:[2,12],ssplexer:12,stabl:22,stack:[2,11],stan:[1,2,12],stand:21,standalon:10,standard:[0,1,2,3,6,7,12,15,21],standard_typ:18,standardml:12,stanlex:12,stap:1,star:11,starlark:2,start:[0,2,6,7,8,11,12,18],starter:11,startinlin:12,stata:[1,2],statalex:12,state1:11,state2:11,state:[2,12],statement:[0,2,12],staticmethod:0,statist:2,statu:12,stdin:12,stdout:[3,19],stefan:[1,12],step:[7,11,21],stepan:1,stephen:1,steve:1,steven:1,still:[2,7,11,12,17,18,19],stingrai:12,stolen:12,store:[7,11,12,21],stou:1,strachan:1,straightforward:6,strang:11,stream:[0,2,3,4,5,6,7,12,15,19],strict:2,string:[0,2,3,5,6,7,10,11,12,15,17,18,19],string_to_tokentyp:18,stringio:7,strip:[11,12,15],stripal:[0,12,15],stripnl:[0,2,12],strong:18,strongli:12,structur:[2,12],stuart:1,studio:[2,12],stuff:7,style:[0,1,2,5,7,8,11,14,15,18,23],style_map:[0,17],stylebegin:6,styleend:6,styleguid:5,stylemap:6,stylenameclass:17,stylesheet:[2,3,7,13,15],styleshet:12,subclass:[0,2,7,12,17],subdirectori:13,subhead:[11,18],subheadlin:18,submit:8,subpackag:[2,17],subsequ:[0,7,12],subset:12,subsystem:12,subtoken:17,subtyp:18,successor:[21,23],suffix:[2,11],suggest:8,suit:[1,2,11],suitabl:[0,2,7,22],sullivan:1,supercollid:[1,2],supercolliderlex:12,superset:12,supertoken:17,suppli:[12,18],support:[1,2,3,6,7,11,12,15,16,17,18,22],suppos:[7,15],suppress:2,sure:[3,11,12,22],surpris:2,sven:1,svg:[1,2,7,12],svgformatt:7,svh:12,svn:12,swallow:2,swg:12,swift:[1,2,12],swiftlex:12,swig:[1,2,12],swiglex:12,sybas:12,sylvestr:1,symbol:[2,12,18],synonym:11,syntact:[18,21],syntax:[2,3,7,11,17,18,21,22],syntaxerror:2,sys:[3,19],system:[3,7,10,11,20],systemverilog:[1,2,12],systemveriloglex:12,sysutil:12,tab:[2,5,7,12],tabl:[2,7],tabsiz:[0,5,12],tac:12,tad:[1,2,12],tads3:12,tads3lex:12,tag:[2,3,5,6,7,11,12,15,18],tagsfil:7,tagurlformat:7,tail:12,take:[0,2,5,7,11,12,15,20,21,22,23],taken:[0,2,12],tamil:[12,23],tango:[1,2],tanner:1,tap:[1,12],taplex:12,tarbal:[2,11],target:12,task:12,tasm:12,tasmlex:[2,12],tassilo:1,tcl:[1,2,23],tcllexer:12,tcsh:[1,2,12,23],tcshcon:12,tcshlexer:12,tcshsessionlex:12,tea:[1,2,12,23],team:[17,22],teatemplatelex:12,techniqu:11,ted:1,tell:[7,11],templat:[0,2,11,15],tenani:1,teng:[1,2],tera:[1,2,23],teraterm:[2,12],teratermlex:12,teratermmacro:12,term:[1,2,11,15,23],termcap:12,termcaplex:12,termin:[1,2,3,7,12,15,19],terminal16m:7,terminal256:[1,2,7],terminal256formatt:[7,17],terminalformatt:[2,3,7],terminaltruecolorformatt:7,terminfo:12,terminfolex:12,ternari:2,terraform:[1,2,12],terraformi:12,terraformlex:12,test:[1,2,3,15,18],testcas:7,testcaseformatt:7,tex:[7,12,23],texcom:7,texlex:12,text:[0,2,3,7,11,15,16,17,18,19,21,22],textedit:12,textfmt:12,textlex:12,than:[2,9,11,17],thank:[1,2,7,10],thei:[0,2,6,7,11,12,14,15,17,18],them:[0,2,5,7,8,11,12,15,18,21,22,23],theme:[2,17],themselv:11,theori:7,therefor:[3,7,12],thi:[0,2,3,5,6,7,10,11,12,13,14,15,16,17,18,19,20,22,23],thing:11,think:[0,19],third:11,thoma:1,thoroughli:21,those:[6,11,12,14,18],though:[12,21],three:2,thrift:[1,2,12],thriftlex:12,through:[3,11,15],thu:[7,17],thurgood:[1,12],thy:12,tiberiu:[1,2],ticket:[2,22],tiffin:1,tim:[1,2,22],time:[2,7,11],timhatch:1,timothi:1,titl:[0,7],tmp:11,tmpl:12,toc:12,todo:[2,5,12],todotxt:12,todotxtlex:12,togeth:[7,12,15],toggl:7,token:[0,2,3,4,5,6,7,8,12,15,17],token_typ:6,tokenmerg:5,tokenmergefilt:5,tokensourc:[0,6],tokenstr:7,tokentyp:[0,5,7,11],tolbert:1,toler:11,tom:1,toml:[1,2,12,23],tomllex:12,too:[2,3,11,12],tool:[2,12,21,22],top:[7,11,13],toplevel:11,topmost:11,total:[5,11],totaldownload:12,tpl:12,trac:[2,12,21,23],traceback:[1,2,12,18,23],tracker:[2,21,22],traffic:12,trafficscript:1,trail:[12,15],trailer:2,trait:12,transact:[1,12],transactsqllex:12,transcript:12,transfer:12,transform:12,translat:2,transpar:17,treat:[2,12,17],treat_stdlib_adts_as_builtin:12,tree:12,treetop:[1,2,12],treetoplex:12,trevor:1,tri:[2,3,11,19],trick:[8,15],tricki:11,trigger:7,troff:12,trove:2,trust:11,trute:1,tryzelaar:1,tspan:7,tsql:12,tst:12,tsx:12,ttl:12,ttype:[4,6],tupl:[0,6,7,11,12],turbo:12,turbopasc:12,turn:12,turtl:[1,2,12],turtlelex:12,tutori:10,twig:[2,12],twightmllex:12,twiglex:12,two:[6,7,11,12,15],twowaybind:12,txt:[2,7,12],type:[0,2,5,6,7,11,12,15,17,18],typescript:[1,2,12],typescriptlex:12,typeset:[7,12],typic:12,typo3:12,typo3cm:12,typoscript:[1,2],typoscriptcssdata:12,typoscriptcssdatalex:12,typoscripthtmldata:12,typoscripthtmldatalex:12,typoscriptlex:12,typoscriptrefer:12,ucodelex:12,udalov:1,udiff:[2,12],udo:12,unabl:11,unbalanc:12,unchang:7,uncolor:4,uncolorfilt:4,under:[11,20,21],underlin:[6,7,12,17],underscor:2,understand:[7,14,17],undocu:12,unexpect:19,unfold:12,unfortun:11,unhandl:12,unicod:[2,5,7,8,11,12,15],unicodedecodeerror:19,unicodeerror:2,unicodelevel:12,unicon:2,uniconlex:12,unifi:[2,12],uniqu:[0,15],unistr:2,unit:[2,12],univers:[1,21],unix:12,unknown:12,unless:7,unlex:12,unlimit:[12,18],unmatch:12,unmodifi:11,unnecessari:11,unnecessarili:12,unpack:11,unquot:2,unsign:2,unstyl:18,until:11,untouch:12,unus:18,updat:[1,2,7,12],upper:[3,5],uppercas:[5,15,18],urbi:12,urbiscript:[1,2],urbiscriptlex:12,usabl:[0,6,17,21,22],usag:[0,3,21],use:[2,3,4,5,6,7,10,11,12,13,14,15,16,17,18,19,22],used:[0,2,3,4,5,6,7,11,12,13,14,15,17,18,21],useful:[2,5,7,12,18,21],usepackag:7,user:[0,7,9,12],uses:[0,2,4,7,9,10,11,15],usesyslog:12,using:[0,2,3,4,7,11,12,15,16,17,18,19,20,21],usr:15,usual:[7,15,20],utf8:2,utf:[2,7,10,12,19],util:[0,2,4,21],v4_0_0:10,vala:[1,2,12,23],valalex:12,valentin:1,valid:[0,12,21],vallentin:1,valu:[0,2,3,4,5,6,7,11,12,14,17,18],valueerror:2,van:1,vapi:12,vari:17,variabl:[2,7,10,11,12,18],variant:[12,17],varieti:17,variou:[0,2,8,23],vark:12,varnish:[1,2],varrazzo:1,varun:1,vba:12,vbnet:12,vbnetaspxlex:12,vbnetlex:12,vbs:12,vbscript:[1,2,12],vbscriptlex:12,vcl:12,vcllexer:12,vclsnippet:12,vclsnippetlex:12,vclsrc:12,vctreestatu:12,vctreestatuslex:12,veloc:[2,12],velocityhtmllex:12,velocitylex:12,velocityxmllex:12,vera:7,verbatim:[2,7],verbopt:7,verbosepkglist:12,veri:[0,4,11,12,22],verilog:[2,12,23],veriloglex:12,version:[0,3,4,5,7,10,11,12,15,17,18,19,21,23],versionad:12,vert:12,vfp:12,vgl:[1,2,12],vgllexer:12,vhd:12,vhdl:[1,2,12,23],vhdllexer:12,via:[7,9,12,19],view:[11,12],viewer:[7,21],viewvc:21,vim:[1,2,12,23],viml:12,vimlex:12,vimrc:12,vincent:1,vinot:1,virtualenv:2,visibl:[2,5],visiblewhitespacefilt:[2,5,18],vision:12,visit:15,visual:[1,2,23],vnd:12,voelker:1,volunt:22,vpr:12,wai:[2,7,11,17,18,19],wait:21,want:[3,5,7,10,11,12,13,14,15,17,18],wasn:11,watch:23,wavemetr:12,wdiff:[1,2,12],wdifflex:12,web:[11,21],webmisc:12,websit:2,weechat:[2,12],weechatlog:12,weight:[7,15],weizenbaum:1,welcom:[15,23],well:[2,6,7,12,21,23],were:[2,14],what:[2,3,15],wheel:2,when:[2,5,7,11,12,15,17,22],where:[0,6,7,11,13,15,17],whether:[0,2,7],whetsel:1,which:[0,2,3,5,7,10,11,12,14,15,17,18,19,20],whilei:[1,2],whileylex:12,white:17,whitespac:[0,2,5,7,11,12,15,17,18],whitnei:1,whole:[6,7,11],whose:[2,12,13,15],why:[4,21,23],wide:[12,21,22],width:7,wiki:[2,12,13,21,22,23],wikipedia:21,wildcard:3,william:1,willing:13,winbatch:12,winbug:12,window:[2,7,12,23],winkler:1,winner:2,winston:2,winter:[1,12],wish:21,within:[0,7,12],without:[0,2,7,11,12,14,18],wlua:12,wolfram:12,won:[3,4,7,11],word:[2,3,5,7,11,12,18],wordpress:21,work:[2,3,5,7,10,11,12,15,17,20],workaround:7,workspac:12,world:[3,10,15,17],would:[5,6,7,10,11,12,18],wouldn:7,wppygment:21,wpygment:21,wrap:[2,6,7,15],wrapcod:7,wrapper:[2,7,21],write:[0,2,5,7,8,12,14,15,18,19,21,23],written:[0,1,2,3,7,11,12,14,15,17,19,21],wrong:19,wsdl:12,wsf:12,wstokentyp:5,www:[10,12],wxhtmlpygment:21,wybir:1,x10:1,x10lexer:12,x1b:17,x1z:12,xbase:12,xchat:[2,12],xcode:2,xds:12,xhtml:[12,21],xmi:12,xml:[2,7,10,18,23],xmldjangolex:12,xmlerblex:12,xmllexer:12,xmln:10,xmlphplexer:12,xmlschema:10,xmlsmartylex:12,xoffset:7,xorglex:12,xpl:12,xql:12,xqm:12,xqueri:[1,2,12,23],xquerylex:12,xqy:12,xsd:[10,12],xsi:10,xsl:12,xslt:[1,2,12,23],xsltlexer:12,xten:12,xtend:[1,2,12],xtendlex:12,xtlang:12,xtlanglex:12,xtm:12,xul:12,xwiki:21,xxd:12,xxx:5,yai:2,yaml:[1,2,12,23],yamljinjalex:12,yamllex:12,yellow:17,yes:[0,11],yet:3,yield:[0,2,4,6,7,11,12],yml:12,yoffset:7,you:[0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22],young:1,your:[3,5,7,8,9,12,13,14,16,17,18,23],your_formatt:3,your_lex:[3,11],your_lexer_fil:11,your_named_lex:11,yourapp:17,yourfilt:14,yourformatt:14,yourlex:14,yourmodul:[14,17],yourself:[11,21],yourstyl:[14,17],ystep:7,zamboni:1,zamudio:1,zeek:[2,12,23],zeeklex:12,zeitdilat:2,zep:12,zephir:[12,23],zephirlex:12,zero:12,zerodivisionerror:12,zig:[2,23],ziglang:12,ziglex:12,zimin:1,zimmerman:1,zimtstern:2,zip:12,zsh:12,zshrc:12,zurczak:1},titles:["The full Pygments API","Full contributor list","Pygments changelog","Command Line Interface","Write your own filter","Filters","Write your own formatter","Available formatters","Pygments documentation","Using Pygments in various scenarios","Use Pygments in Java","Write your own lexer","Available lexers","Using Pygments with MoinMoin","Register Plugins","Introduction and Quickstart","Using Pygments in ReST documents","Styles","Builtin Tokens","Unicode and Encodings","Download and installation","Pygments FAQ","Welcome!","Supported languages"],titleterms:{"0rc1":2,"6rc1":2,"class":[7,11],"new":[11,21],Adding:11,RTS:12,SAS:12,The:[0,11,14,22],Use:[10,14],Using:[4,9,11,13,16],actionscript:12,advanc:11,algebra:12,all:[12,23],ambienttalk:12,ampl:12,api:0,apl:12,archetyp:12,architectur:15,assembl:12,author:22,autom:12,avail:[7,12,17],bash:9,basic:12,bibliographi:12,bibtex:12,bnf:12,boa:12,bug:21,builtin:[5,17,18],busi:12,callback:11,can:21,cap:12,chang:11,changelog:2,chapel:12,clean:12,code:12,come:21,command:[3,12,15],comment:18,common:7,compact:12,complet:9,comput:12,config:12,configur:12,consol:12,contribut:22,contributor:1,core:14,creat:17,crystal:12,csound:12,css:12,custom:3,dalvik:12,data:12,decor:4,definit:[6,12],deleg:11,deriv:11,descriptor:12,develop:20,dialect:12,diff:12,document:[8,16],doe:21,domain:12,download:20,dsl:12,dump:12,dylan:12,ecl:12,eiffel:12,elm:12,embed:12,encod:[3,19],engin:12,entrypoint:14,erlang:12,esoter:12,exampl:15,extend:14,extendedregexlex:11,extens:12,ezhil:12,factor:12,famili:12,fantom:12,faq:21,featur:21,felix:12,fiction:12,file:12,filter:[3,4,5],flag:11,floscript:12,format:12,formatt:[0,3,6,7,15],formerli:12,forth:12,fortran:12,foxpro:12,framework:12,freefem:12,from:[11,21],full:[0,1],game:12,gener:[3,6,12,18],get:[3,17],googl:12,grammer:12,graph:12,graphic:12,guess:15,handl:11,hardwar:12,haskel:12,hax:12,help:3,hexadecim:12,high:0,how:[14,21],html:[6,12],icon:12,idl:12,igor:12,includ:12,inferno:12,instal:[12,20],interact:12,interfac:3,intermedi:12,internet:12,introduct:15,iter:12,ivl:12,java:10,javascript:12,julia:12,jvm:12,keyword:[11,18],known:12,languag:[9,12,21,23],level:0,lexer:[0,3,11,12,15],like:12,line:[3,15],lispi:12,list:[1,11,17],liter:18,lookup:15,macro:12,mail:12,make:21,makefil:12,man:12,markdown:9,markup:[12,23],matlab:12,microsoft:12,mime:12,misc:12,model:12,modifi:11,modula:12,moinmoin:13,mont:12,multi:12,multipl:11,multipurpos:12,mxml:12,name:[3,18,21],ncar:12,net:12,nim:12,nimrod:12,nit:12,nix:12,nixo:12,non:12,notat:12,note:3,oberon:12,object:12,onc:11,ooc:12,oper:[12,18],option:[0,3,7,15],orient:12,other:[9,12,23],output:12,over:12,own:[4,6,11,17],packag:[12,20],page:12,parasail:12,parser:12,pascal:12,patch:12,pawn:12,perl:12,php:12,plot:12,plugin:14,poni:12,praat:12,pro:12,process:[0,12,21],program:[12,21,23],prolog:12,proto:12,prove:12,punctuat:18,pygment:[0,2,8,9,10,12,13,16,21],python:12,queri:12,quickstart:[6,15],qvt:12,raw:12,rdf:12,rebol:12,regex:11,regexlex:11,regist:14,relat:12,relax:12,report:21,requir:21,resourc:12,rest:16,riverb:12,roboconf:12,robot:12,rubi:12,rule:17,rust:12,scan:11,scdoc:12,scenario:9,schema:12,script:12,semant:12,session:12,sgf:12,shell:12,similar:12,simpl:12,slash:12,smalltalk:12,smart:12,smv:12,snobol:12,solid:12,sourc:[12,20],special:12,specif:12,sql:12,stata:12,state:11,stream:11,stuff:12,style:[3,6,12,17,21],stylesheet:12,subclass:[4,11],suggest:21,supercollid:12,support:[21,23],syntax:12,system:[12,21],tcl:12,templat:[12,23],tera:12,term:12,termin:17,test:[11,12],text:12,textmat:9,than:12,theorem:12,thi:21,token:[11,18],trafficscript:12,trick:11,typoscript:12,ucod:12,unicod:19,unicon:12,urbiscript:12,usag:15,use:21,uses:21,variou:[9,12],varnish:12,verif:12,version:[2,20],visual:12,want:21,web:12,welcom:22,what:21,where:21,whilei:12,who:21,wrapper:9,write:[4,6,11],x10:12,xml:12,xorg:12,your:[4,6,11],zig:12}}) \ No newline at end of file diff --git a/doc/_static/demo.css b/doc/_static/demo.css new file mode 100644 index 0000000..9344291 --- /dev/null +++ b/doc/_static/demo.css @@ -0,0 +1,38 @@ +#try { + background-color: #f6f6f6; + border-radius: 0; + border: 1px solid #ccc; + margin-top: 15px; + padding: 10px 15px 5px 10px; + position: relative; +} + +#try h2 { + margin-top: 0; +} + +#try textarea { + border: 1px solid #999; + padding: 2px; + width: 100%; + min-height: 150px; +} + +#hlcode pre { + background-color: transparent; + border-radius: 0; +} + +#loading { + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; + margin: auto auto; + background-color: #cccccccc; + display: flex; + flex-direction: column; + justify-content: center; + text-align: center; +} diff --git a/doc/_static/demo.js b/doc/_static/demo.js new file mode 100644 index 0000000..f538492 --- /dev/null +++ b/doc/_static/demo.js @@ -0,0 +1,100 @@ +languagePluginLoader.then(() => { + // pyodide is now ready to use... + pyodide.loadPackage('Pygments').then(() => { + pyodide.runPython('import pygments.lexers, pygments.formatters.html, pygments.styles'); + + var lexerlist = pyodide.runPython('list(pygments.lexers.get_all_lexers())'); + var sel = document.getElementById("lang"); + for (lex of lexerlist) { + var opt = document.createElement("option"); + opt.text = lex[0]; + opt.value = lex[1][0]; + sel.add(opt); + } + + var stylelist = pyodide.runPython('list(pygments.styles.get_all_styles())'); + var sel = document.getElementById("style"); + for (sty of stylelist) { + if (sty != "default") { + var opt = document.createElement("option"); + opt.text = sty; + opt.value = sty; + sel.add(opt); + } + } + + document.getElementById("hlbtn").disabled = false; + document.getElementById("loading").style.display = "none"; + }); +}); + +function new_file() { + pyodide.globals['fname'] = document.getElementById("file").files[0].name; + var alias = pyodide.runPython('pygments.lexers.find_lexer_class_for_filename(fname).aliases[0]'); + var sel = document.getElementById("lang"); + for (var i = 0; i < sel.length; i++) { + if (sel.options[i].value == alias) { + sel.selectedIndex = i; + reset_err_hl(); + break; + } + } +} + +function reset_err_hl() { + document.getElementById("aroundlang").style.backgroundColor = null; +} + +function highlight() { + var select = document.getElementById("lang"); + var alias = select.options.item(select.selectedIndex).value + + if (alias == "") { + document.getElementById("aroundlang").style.backgroundColor = "#ffcccc"; + return; + } + pyodide.globals['alias'] = alias; + + var select = document.getElementById("style"); + pyodide.globals['style'] = select.options.item(select.selectedIndex).value; + + pyodide.runPython('lexer = pygments.lexers.get_lexer_by_name(alias)'); + pyodide.runPython('fmter = pygments.formatters.html.HtmlFormatter(noclasses=True, style=style)'); + + var file = document.getElementById("file").files[0]; + if (file) { + file.arrayBuffer().then(function(buf) { + pyodide.globals['code_mem'] = buf; + pyodide.runPython('code = bytes(code_mem)'); + highlight_now(); + }); + } else { + pyodide.globals['code'] = document.getElementById("code").value; + highlight_now(); + } +} + +function highlight_now() { + var out = document.getElementById("hlcode"); + out.innerHTML = pyodide.runPython('pygments.highlight(code, lexer, fmter)'); + document.location.hash = "#try"; + document.getElementById("hlcodedl").style.display = "block"; +} + +function download_code() { + var filename = "highlighted.html"; + var hlcode = document.getElementById("hlcode").innerHTML; + var blob = new Blob([hlcode], {type: 'text/html'}); + if (window.navigator.msSaveOrOpenBlob) { + window.navigator.msSaveBlob(blob, filename); + } + else{ + var elem = window.document.createElement('a'); + elem.href = window.URL.createObjectURL(blob); + elem.download = filename; + document.body.appendChild(elem); + elem.click(); + document.body.removeChild(elem); + window.URL.revokeObjectURL(elem.href); + } +} diff --git a/doc/_static/github.png b/doc/_static/github.png new file mode 100644 index 0000000..5d146ad Binary files /dev/null and b/doc/_static/github.png differ diff --git a/doc/_static/spinner.gif b/doc/_static/spinner.gif new file mode 100644 index 0000000..2212db9 Binary files /dev/null and b/doc/_static/spinner.gif differ diff --git a/doc/_templates/demo.html b/doc/_templates/demo.html new file mode 100644 index 0000000..bc788d1 --- /dev/null +++ b/doc/_templates/demo.html @@ -0,0 +1,53 @@ +{% extends "layout.html" %} +{% set sidebars = sidebars + ["demo_sidebar.html"] %} + +{% block extrahead %} +{{ super() }} + + + + +{% endblock %} + +{% block htmltitle %}Demo{{ titlesuffix }}{% endblock %} + +{% block body %} +{{ body }} + +

Demo - Try it out!

+

The highlighting here is performed in-browser using + a WebAssembly translation of Pygments, courtesy of + Pyodide.

+

Your content is neither sent over the web nor stored anywhere.

+ +
+

Enter code and select a language

+
+

+   +    + ·   +  

+

  +   or enter code below:

+

+

+ +    

+
+
+

+

Loading Python...

+
+
+ +
+ + +{% endblock %} diff --git a/doc/_templates/demo_sidebar.html b/doc/_templates/demo_sidebar.html new file mode 100644 index 0000000..3f2a86c --- /dev/null +++ b/doc/_templates/demo_sidebar.html @@ -0,0 +1 @@ +

Back to top

diff --git a/doc/_templates/index_with_try.html b/doc/_templates/index_with_try.html new file mode 100644 index 0000000..e69de29 diff --git a/doc/_templates/indexsidebar.html b/doc/_templates/indexsidebar.html index 2995455..5544f98 100644 --- a/doc/_templates/indexsidebar.html +++ b/doc/_templates/indexsidebar.html @@ -3,23 +3,22 @@

This documentation is for version {{ version }}, which is not released yet.

You can use it from the - Mercurial repo or look for - released versions in the Python + Git repo or look for + released versions in the Python Package Index.

{% else %}

Current version: {{ version }}

Get Pygments from the Python Package -Index, or install it with:

+ Index, or install it with:

pip install Pygments
{% endif %}

Questions? Suggestions?

-

Clone at Bitbucket -or come to the #pocoo channel on FreeNode.

+

+ Clone at GitHub.

You can also open an issue at the - tracker.

+ tracker.

- + project

diff --git a/doc/conf.py b/doc/conf.py index 00db7d9..3ab5c2e 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -35,7 +35,7 @@ master_doc = 'index' # General information about the project. project = u'Pygments' -copyright = u'2015, Georg Brandl' +copyright = u'2006-2019, Georg Brandl and Pygments contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -97,7 +97,7 @@ html_theme_path = ['_themes'] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +html_title = 'Pygments' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None @@ -125,12 +125,14 @@ html_static_path = ['_static'] #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -html_sidebars = {'index': ['indexsidebar.html'], - 'docs/*': ['docssidebar.html']} +html_sidebars = {'index': ['indexsidebar.html', 'searchbox.html']} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +if os.environ.get('WEBSITE_BUILD'): + html_additional_pages = { + 'demo': 'demo.html', + } # If false, no module index is generated. #html_domain_indices = True @@ -159,7 +161,7 @@ html_sidebars = {'index': ['indexsidebar.html'], #html_file_suffix = None # Output file base name for HTML help builder. -htmlhelp_basename = 'Pygmentsdoc' +htmlhelp_basename = 'Pygments' # -- Options for LaTeX output -------------------------------------------------- @@ -178,8 +180,8 @@ latex_elements = { # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'Pygments.tex', u'Pygments Documentation', - u'Georg Brandl', 'manual'), + ('docs/index', 'Pygments.tex', u'Pygments Documentation', + u'Pygments authors', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of @@ -208,34 +210,21 @@ latex_documents = [ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - ('index', 'pygments', u'Pygments Documentation', - [u'Georg Brandl'], 1) + ('docs/index', 'pygments', u'Pygments Documentation', + [u'Pygments authors'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False -# -- Options for Texinfo output ------------------------------------------------ - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'Pygments', u'Pygments Documentation', - u'Georg Brandl', 'Pygments', 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -#texinfo_appendices = [] +# Example configuration for intersphinx: refer to the Python standard library. +#intersphinx_mapping = {'http://docs.python.org/': None} -# If false, no module index is generated. -#texinfo_domain_indices = True -# How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' +def pg_context(app, pagename, templatename, ctx, event_arg): + ctx['demo_active'] = bool(os.environ.get('WEBSITE_BUILD')) -# Example configuration for intersphinx: refer to the Python standard library. -#intersphinx_mapping = {'http://docs.python.org/': None} +def setup(app): + app.connect('html-page-context', pg_context) diff --git a/doc/docs/index.rst b/doc/docs/index.rst index 30d5c08..4cf710f 100644 --- a/doc/docs/index.rst +++ b/doc/docs/index.rst @@ -50,17 +50,12 @@ Pygments documentation integrate **About Pygments** - + .. toctree:: :maxdepth: 1 changelog authors - -If you find bugs or have suggestions for the documentation, please look -:ref:`here ` for info on how to contact the team. - -.. XXX You can download an offline version of this documentation from the - :doc:`download page `. - +If you find bugs or have suggestions for the documentation, please submit them +on `GitHub `. diff --git a/doc/docs/lexerdevelopment.rst b/doc/docs/lexerdevelopment.rst index 63bd01a..5b6813f 100644 --- a/doc/docs/lexerdevelopment.rst +++ b/doc/docs/lexerdevelopment.rst @@ -136,7 +136,7 @@ have to perform the following steps. First, change to the current directory containing the Pygments source code. You will need to have either an unpacked source tarball, or (preferably) a copy -cloned from BitBucket. +cloned from GitHub. .. code-block:: console @@ -611,7 +611,7 @@ possibility to influence the position. There are not really any simple examples for lexer callbacks, but you can see them in action e.g. in the `SMLLexer` class in `ml.py`_. -.. _ml.py: http://bitbucket.org/birkenfeld/pygments-main/src/tip/pygments/lexers/ml.py +.. _ml.py: https://github.com/pygments/pygments/blob/master/pygments/lexers/ml.py The ExtendedRegexLexer class @@ -667,7 +667,7 @@ For example, this is how the hypothetical lexer above would be written with the This might sound confusing (and it can really be). But it is needed, and for an example look at the Ruby lexer in `ruby.py`_. -.. _ruby.py: https://bitbucket.org/birkenfeld/pygments-main/src/tip/pygments/lexers/ruby.py +.. _ruby.py: https://github.com/pygments/pygments/blob/master/pygments/lexers/ruby.py Handling Lists of Keywords diff --git a/doc/download.rst b/doc/download.rst index cf32f48..975c41b 100644 --- a/doc/download.rst +++ b/doc/download.rst @@ -18,17 +18,15 @@ manager as usual. Development sources ------------------- -We're using the `Mercurial `_ version control -system. You can get the development source using this command:: +We're using the Git version control system. You can get the development source +using this command:: - hg clone http://bitbucket.org/birkenfeld/pygments-main pygments + git clone https://github.com/pygments/pygments -Development takes place at `Bitbucket -`_, you can browse the source -online `here `_. +Development takes place at `GitHub `_. The latest changes in the development source code are listed in the `changelog -`_. +`_. .. Documentation ------------- @@ -36,6 +34,6 @@ The latest changes in the development source code are listed in the `changelog .. XXX todo You can download the documentation either as - a bunch of rst files from the Mercurial repository, see above, or + a bunch of rst files from the Git repository, see above, or as a tar.gz containing rendered HTML files:

pygmentsdocs.tar.gz

diff --git a/doc/faq.rst b/doc/faq.rst index 172929e..108cef4 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -62,9 +62,9 @@ Please see the :doc:`documentation on styles `. How can I report a bug or suggest a feature? -------------------------------------------- -Please report bugs and feature wishes in the tracker at Bitbucket. +Please report bugs and feature wishes in the tracker at GitHub. -You can also e-mail the author or use IRC, see the contact details. +You can also e-mail the authors, see the contact details. I want this support for this language! -------------------------------------- @@ -132,8 +132,9 @@ This is an (incomplete) list of projects and sites known to use the Pygments hig * `Clygments `_, a pygments wrapper for Clojure * `PHPygments `_, a pygments wrapper for PHP - +* `Spyder `_, the Scientific Python Development + Environment, uses pygments for the multi-language syntax highlighting in its + `editor `_. If you have a project or web site using Pygments, drop me a line, and I'll add a link here. - diff --git a/doc/index.rst b/doc/index.rst index 2611404..d89277e 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -26,15 +26,9 @@ Like every open-source project, we are always looking for volunteers to help us with programming. Python knowledge is required, but don't fear: Python is a very clear and easy to learn language. -Development takes place on `Bitbucket -`_, where the Mercurial -repository, tickets and pull requests can be viewed. +Development takes place on `GitHub `_. -Our primary communication instrument is the IRC channel **#pocoo** on the -Freenode network. To join it, let your IRC client connect to -``irc.freenode.net`` and do ``/join #pocoo``. - -If you found a bug, just open a ticket in the Bitbucket tracker. Be sure to log +If you found a bug, just open a ticket in the GitHub tracker. Be sure to log in to be notified when the issue is fixed -- development is not fast-paced as the library is quite stable. You can also send an e-mail to the developers, see below. @@ -42,7 +36,8 @@ below. The authors ----------- -Pygments is maintained by **Georg Brandl**, e-mail address *georg*\ *@*\ *python.org*. +Pygments is maintained by **Georg Brandl**, e-mail address *georg*\ *@*\ *python.org* +and **Matthäus Chajdas**. Many lexers and fixes have been contributed by **Armin Ronacher**, the rest of the `Pocoo `_ team and **Tim Hatch**. diff --git a/doc/languages.rst b/doc/languages.rst index b06ccc5..a91664c 100644 --- a/doc/languages.rst +++ b/doc/languages.rst @@ -38,6 +38,7 @@ Programming languages * Delphi * Dylan * `Elm `_ +* Email * Erlang * `Ezhil `_ Ezhil - A Tamil programming language * Factor @@ -69,6 +70,7 @@ Programming languages * MuPad * Nemerle * Nimrod +* Notmuch * Objective-C * Objective-J * Octave @@ -88,6 +90,7 @@ Programming languages * Rust * S, S-Plus, R * Scala +* `Scdoc `_ * Scheme * Scilab * `SGF `_ @@ -95,6 +98,7 @@ Programming languages * `Slurm `_ * Smalltalk * SNOBOL +* `Solidity `_ * Tcl * `Tera Term language `_ * `TOML `_ @@ -104,7 +108,9 @@ Programming languages * Visual Basic.NET * Visual FoxPro * XQuery +* `Zeek `_ * Zephir +* `Zig `_ Template languages ------------------ @@ -162,9 +168,9 @@ Other markup ... that's all? --------------- -Well, why not write your own? Contributing to Pygments is easy and fun. Take a look at the -:doc:`docs on lexer development ` and -:ref:`contact details `. +Well, why not write your own? Contributing to Pygments is easy and fun. Take a +look at the :doc:`docs on lexer development `. Pull +requests are welcome on `GitHub `. Note: the languages listed here are supported in the development version. The latest release may lack a few of them. diff --git a/external/autopygmentize b/external/autopygmentize index d2d0597..8a2e7a6 100755 --- a/external/autopygmentize +++ b/external/autopygmentize @@ -1,6 +1,6 @@ #!/bin/bash # Best effort auto-pygmentization with transparent decompression -# by Reuben Thomas 2008-2016 +# by Reuben Thomas 2008-2019 # This program is in the public domain. # Strategy: first see if pygmentize can find a lexer; if not, ask file; if that finds nothing, fail @@ -15,7 +15,7 @@ file_common_opts="--brief --dereference" lexer=$(pygmentize -N "$file") if [[ "$lexer" == text ]]; then - unset lexer + # Try to do better than just "text" case $(file --mime-type --uncompress $file_common_opts "$file") in application/xml|image/svg+xml) lexer=xml;; application/javascript) lexer=javascript;; @@ -66,36 +66,42 @@ if [[ "$lexer" == text ]]; then esac fi -# Find a preprocessor for compressed files +# Find a concatenator for compressed files concat=cat case $(file $file_common_opts --mime-type "$file") in - application/x-gzip) concat=zcat;; + application/gzip) concat=zcat;; application/x-bzip2) concat=bzcat;; application/x-xz) concat=xzcat;; esac -# Find a suitable lexer, preceded by a hex dump for binary files +# Find a suitable reader, preceded by a hex dump for binary files, +# or fmt for text with very long lines prereader="" +reader=cat encoding=$(file --mime-encoding --uncompress $file_common_opts "$file") -if [[ $encoding == "binary" ]]; then - prereader="od -x" # POSIX fallback - if [[ -n $(which hd) ]]; then - prereader="hd" # preferred - fi - lexer=hexdump - encoding=latin1 -fi -if [[ -n "$lexer" ]]; then +# FIXME: need a way to switch between hex and text view, as file often +# misdiagnoses files when they contain a few control characters +# if [[ $encoding == "binary" ]]; then +# prereader="od -x" # POSIX fallback +# if [[ -n $(which hd) ]]; then +# prereader=hd # preferred +# fi +# lexer=hexdump +# encoding=latin1 +#el +# FIXME: Using fmt does not work well for system logs +# if [[ "$lexer" == "text" ]]; then +# if file "$file" | grep -ql "text, with very long lines"; then +# reader=fmt +# fi +# fi +if [[ "$lexer" != "text" ]]; then reader="pygmentize -O inencoding=$encoding $PYGMENTIZE_OPTS $options -l $lexer" fi -# If we found a reader, run it -if [[ -n "$reader" ]]; then - if [[ -n "$prereader" ]]; then - exec $concat "$file" | $prereader | $reader - else - exec $concat "$file" | $reader - fi +# Run the reader +if [[ -n "$prereader" ]]; then + exec $concat "$file" | $prereader | $reader +else + exec $concat "$file" | $reader fi - -exit 1 diff --git a/external/rst-directive.py b/external/rst-directive.py index 0b7831f..e0c39b3 100644 --- a/external/rst-directive.py +++ b/external/rst-directive.py @@ -64,7 +64,7 @@ class Pygments(Directive): required_arguments = 1 optional_arguments = 0 final_argument_whitespace = True - option_spec = dict([(key, directives.flag) for key in VARIANTS]) + option_spec = {key: directives.flag for key in VARIANTS} has_content = True def run(self): diff --git a/pygmentize b/pygmentize deleted file mode 100755 index aea3872..0000000 --- a/pygmentize +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env python2 - -import sys -import pygments.cmdline -try: - sys.exit(pygments.cmdline.main(sys.argv)) -except KeyboardInterrupt: - sys.exit(1) diff --git a/pygments/__init__.py b/pygments/__init__.py index 15c226a..b28da13 100644 --- a/pygments/__init__.py +++ b/pygments/__init__.py @@ -17,10 +17,10 @@ * it is usable as a command-line tool and as a library * ... and it highlights even Brainfuck! - The `Pygments tip`_ is installable with ``easy_install Pygments==dev``. + The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``. - .. _Pygments tip: - http://bitbucket.org/birkenfeld/pygments-main/get/tip.zip#egg=Pygments-dev + .. _Pygments master branch: + https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. @@ -29,7 +29,7 @@ import sys from pygments.util import StringIO, BytesIO -__version__ = '2.4.2' +__version__ = '2.5.1' __docformat__ = 'restructuredtext' __all__ = ['lex', 'format', 'highlight'] diff --git a/pygments/__main__.py b/pygments/__main__.py new file mode 100644 index 0000000..cd80a2d --- /dev/null +++ b/pygments/__main__.py @@ -0,0 +1,7 @@ +import sys +import pygments.cmdline + +try: + sys.exit(pygments.cmdline.main(sys.argv)) +except KeyboardInterrupt: + sys.exit(1) diff --git a/pygments/cmdline.py b/pygments/cmdline.py index 292cb87..34752d6 100644 --- a/pygments/cmdline.py +++ b/pygments/cmdline.py @@ -554,7 +554,7 @@ def main(args=sys.argv): file=sys.stderr) print('Please report the whole traceback to the issue tracker at', file=sys.stderr) - print('.', + print('.', file=sys.stderr) print('*' * 65, file=sys.stderr) print(file=sys.stderr) diff --git a/pygments/formatters/html.py b/pygments/formatters/html.py index d65c09c..042f04c 100644 --- a/pygments/formatters/html.py +++ b/pygments/formatters/html.py @@ -435,7 +435,7 @@ class HtmlFormatter(Formatter): self.linenostep = abs(get_int_opt(options, 'linenostep', 1)) self.linenospecial = abs(get_int_opt(options, 'linenospecial', 0)) self.nobackground = get_bool_opt(options, 'nobackground', False) - self.lineseparator = options.get('lineseparator', '\n') + self.lineseparator = options.get('lineseparator', u'\n') self.lineanchors = options.get('lineanchors', '') self.linespans = options.get('linespans', '') self.anchorlinenos = options.get('anchorlinenos', False) diff --git a/pygments/formatters/img.py b/pygments/formatters/img.py index de0ea0a..6bb3364 100644 --- a/pygments/formatters/img.py +++ b/pygments/formatters/img.py @@ -46,9 +46,9 @@ STYLES = { } # A sane default for modern systems -DEFAULT_FONT_NAME_NIX = 'Bitstream Vera Sans Mono' +DEFAULT_FONT_NAME_NIX = 'DejaVu Sans Mono' DEFAULT_FONT_NAME_WIN = 'Courier New' -DEFAULT_FONT_NAME_MAC = 'Courier New' +DEFAULT_FONT_NAME_MAC = 'Menlo' class PilNotAvailable(ImportError): @@ -125,8 +125,8 @@ class FontManager(object): for font_dir in (os.path.join(os.getenv("HOME"), 'Library/Fonts/'), '/Library/Fonts/', '/System/Library/Fonts/'): font_map.update( - ((os.path.splitext(f)[0].lower(), os.path.join(font_dir, f)) - for f in os.listdir(font_dir) if f.lower().endswith('ttf'))) + (os.path.splitext(f)[0].lower(), os.path.join(font_dir, f)) + for f in os.listdir(font_dir) if f.lower().endswith('ttf')) for name in STYLES['NORMAL']: path = self._get_mac_font_path(font_map, self.font_name, name) @@ -237,7 +237,8 @@ class ImageFormatter(Formatter): bold and italic fonts will be generated. This really should be a monospace font to look sane. - Default: "Bitstream Vera Sans Mono" on Windows, Courier New on \\*nix + Default: "Courier New" on Windows, "Menlo" on Mac OS, and + "DejaVu Sans Mono" on \\*nix `font_size` The font size in points to be used. @@ -521,7 +522,8 @@ class ImageFormatter(Formatter): rectw = self.image_pad + self.line_number_width - self.line_number_pad draw.rectangle([(0, 0), (rectw, recth)], fill=self.line_number_bg) - draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg) + if self.line_number_separator: + draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg) del draw def format(self, tokensource, outfile): diff --git a/pygments/formatters/other.py b/pygments/formatters/other.py index a2eca91..c09eff0 100644 --- a/pygments/formatters/other.py +++ b/pygments/formatters/other.py @@ -10,7 +10,7 @@ """ from pygments.formatter import Formatter -from pygments.util import OptionError, get_choice_opt +from pygments.util import get_choice_opt from pygments.token import Token from pygments.console import colorize @@ -87,14 +87,17 @@ class RawTokenFormatter(Formatter): if self.compress == 'gz': import gzip outfile = gzip.GzipFile('', 'wb', 9, outfile) + def write(text): outfile.write(text.encode()) flush = outfile.flush elif self.compress == 'bz2': import bz2 compressor = bz2.BZ2Compressor(9) + def write(text): outfile.write(compressor.compress(text.encode())) + def flush(): outfile.write(compressor.flush()) outfile.flush() @@ -115,14 +118,15 @@ class RawTokenFormatter(Formatter): write("%s\t%r\n" % (ttype, value)) flush() + TESTCASE_BEFORE = u'''\ - def testNeedsName(self): + def testNeedsName(lexer): fragment = %r tokens = [ ''' TESTCASE_AFTER = u'''\ ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + assert list(lexer.get_tokens(fragment)) == tokens ''' diff --git a/pygments/formatters/rtf.py b/pygments/formatters/rtf.py index 0dae6aa..1246db2 100644 --- a/pygments/formatters/rtf.py +++ b/pygments/formatters/rtf.py @@ -35,7 +35,7 @@ class RtfFormatter(Formatter): ``'default'``). `fontface` - The used font famliy, for example ``Bitstream Vera Sans``. Defaults to + The used font family, for example ``Bitstream Vera Sans``. Defaults to some generic font which is supposed to have fixed width. `fontsize` @@ -70,7 +70,7 @@ class RtfFormatter(Formatter): .replace(u'}', u'\\}') def _escape_text(self, text): - # empty strings, should give a small performance improvment + # empty strings, should give a small performance improvement if not text: return u'' diff --git a/pygments/lexers/__init__.py b/pygments/lexers/__init__.py index 1baf93b..1cdf25d 100644 --- a/pygments/lexers/__init__.py +++ b/pygments/lexers/__init__.py @@ -20,9 +20,13 @@ from pygments.modeline import get_filetype_from_buffer from pygments.plugin import find_plugin_lexers from pygments.util import ClassNotFound, itervalues, guess_decode, text_type +COMPAT = { + 'Python3Lexer': 'PythonLexer', + 'Python3TracebackLexer': 'PythonTracebackLexer', +} __all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class', - 'guess_lexer', 'load_lexer_from_file'] + list(LEXERS) + 'guess_lexer', 'load_lexer_from_file'] + list(LEXERS) + list(COMPAT) _lexer_cache = {} _pattern_cache = {} @@ -327,6 +331,8 @@ class _automodule(types.ModuleType): cls = _lexer_cache[info[1]] setattr(self, name, cls) return cls + if name in COMPAT: + return getattr(self, COMPAT[name]) raise AttributeError(name) diff --git a/pygments/lexers/_asy_builtins.py b/pygments/lexers/_asy_builtins.py index 6ac79ad..b76c22a 100644 --- a/pygments/lexers/_asy_builtins.py +++ b/pygments/lexers/_asy_builtins.py @@ -14,7 +14,7 @@ :license: BSD, see LICENSE for details. """ -ASYFUNCNAME = set(( +ASYFUNCNAME = { 'AND', 'Arc', 'ArcArrow', @@ -1038,9 +1038,9 @@ ASYFUNCNAME = set(( 'ztick', 'ztick3', 'ztrans' -)) +} -ASYVARNAME = set(( +ASYVARNAME = { 'AliceBlue', 'Align', 'Allow', @@ -1642,4 +1642,4 @@ ASYVARNAME = set(( 'ylabelwidth', 'zerotickfuzz', 'zerowinding' -)) +} diff --git a/pygments/lexers/_cl_builtins.py b/pygments/lexers/_cl_builtins.py index d0306fa..7722e81 100644 --- a/pygments/lexers/_cl_builtins.py +++ b/pygments/lexers/_cl_builtins.py @@ -9,7 +9,7 @@ :license: BSD, see LICENSE for details. """ -BUILTIN_FUNCTIONS = set(( # 638 functions +BUILTIN_FUNCTIONS = { # 638 functions '<', '<=', '=', '>', '>=', '-', '/', '/=', '*', '+', '1-', '1+', 'abort', 'abs', 'acons', 'acos', 'acosh', 'add-method', 'adjoin', 'adjustable-array-p', 'adjust-array', 'allocate-instance', @@ -157,17 +157,17 @@ BUILTIN_FUNCTIONS = set(( # 638 functions 'wild-pathname-p', 'write', 'write-byte', 'write-char', 'write-line', 'write-sequence', 'write-string', 'write-to-string', 'yes-or-no-p', 'y-or-n-p', 'zerop', -)) +} -SPECIAL_FORMS = set(( +SPECIAL_FORMS = { 'block', 'catch', 'declare', 'eval-when', 'flet', 'function', 'go', 'if', 'labels', 'lambda', 'let', 'let*', 'load-time-value', 'locally', 'macrolet', 'multiple-value-call', 'multiple-value-prog1', 'progn', 'progv', 'quote', 'return-from', 'setq', 'symbol-macrolet', 'tagbody', 'the', 'throw', 'unwind-protect', -)) +} -MACROS = set(( +MACROS = { 'and', 'assert', 'call-method', 'case', 'ccase', 'check-type', 'cond', 'ctypecase', 'decf', 'declaim', 'defclass', 'defconstant', 'defgeneric', 'define-compiler-macro', 'define-condition', 'define-method-combination', @@ -188,19 +188,19 @@ MACROS = set(( 'with-input-from-string', 'with-open-file', 'with-open-stream', 'with-output-to-string', 'with-package-iterator', 'with-simple-restart', 'with-slots', 'with-standard-io-syntax', -)) +} -LAMBDA_LIST_KEYWORDS = set(( +LAMBDA_LIST_KEYWORDS = { '&allow-other-keys', '&aux', '&body', '&environment', '&key', '&optional', '&rest', '&whole', -)) +} -DECLARATIONS = set(( +DECLARATIONS = { 'dynamic-extent', 'ignore', 'optimize', 'ftype', 'inline', 'special', 'ignorable', 'notinline', 'type', -)) +} -BUILTIN_TYPES = set(( +BUILTIN_TYPES = { 'atom', 'boolean', 'base-char', 'base-string', 'bignum', 'bit', 'compiled-function', 'extended-char', 'fixnum', 'keyword', 'nil', 'signed-byte', 'short-float', 'single-float', 'double-float', 'long-float', @@ -217,9 +217,9 @@ BUILTIN_TYPES = set(( 'simple-type-error', 'simple-warning', 'stream-error', 'storage-condition', 'style-warning', 'type-error', 'unbound-variable', 'unbound-slot', 'undefined-function', 'warning', -)) +} -BUILTIN_CLASSES = set(( +BUILTIN_CLASSES = { 'array', 'broadcast-stream', 'bit-vector', 'built-in-class', 'character', 'class', 'complex', 'concatenated-stream', 'cons', 'echo-stream', 'file-stream', 'float', 'function', 'generic-function', 'hash-table', @@ -229,4 +229,4 @@ BUILTIN_CLASSES = set(( 'standard-generic-function', 'standard-method', 'standard-object', 'string-stream', 'stream', 'string', 'structure-class', 'structure-object', 'symbol', 'synonym-stream', 't', 'two-way-stream', 'vector', -)) +} diff --git a/pygments/lexers/_cocoa_builtins.py b/pygments/lexers/_cocoa_builtins.py index f7c55c6..2cf4443 100644 --- a/pygments/lexers/_cocoa_builtins.py +++ b/pygments/lexers/_cocoa_builtins.py @@ -14,9 +14,9 @@ from __future__ import print_function -COCOA_INTERFACES = set(['UITableViewCell', 'HKCorrelationQuery', 'NSURLSessionDataTask', 'PHFetchOptions', 'NSLinguisticTagger', 'NSStream', 'AVAudioUnitDelay', 'GCMotion', 'SKPhysicsWorld', 'NSString', 'CMAttitude', 'AVAudioEnvironmentDistanceAttenuationParameters', 'HKStatisticsCollection', 'SCNPlane', 'CBPeer', 'JSContext', 'SCNTransaction', 'SCNTorus', 'AVAudioUnitEffect', 'UICollectionReusableView', 'MTLSamplerDescriptor', 'AVAssetReaderSampleReferenceOutput', 'AVMutableCompositionTrack', 'GKLeaderboard', 'NSFetchedResultsController', 'SKRange', 'MKTileOverlayRenderer', 'MIDINetworkSession', 'UIVisualEffectView', 'CIWarpKernel', 'PKObject', 'MKRoute', 'MPVolumeView', 'UIPrintInfo', 'SCNText', 'ADClient', 'PKPayment', 'AVMutableAudioMix', 'GLKEffectPropertyLight', 'WKScriptMessage', 'AVMIDIPlayer', 'PHCollectionListChangeRequest', 'UICollectionViewLayout', 'NSMutableCharacterSet', 'SKPaymentTransaction', 'NEOnDemandRuleConnect', 'NSShadow', 'SCNView', 'NSURLSessionConfiguration', 'MTLVertexAttributeDescriptor', 'CBCharacteristic', 'HKQuantityType', 'CKLocationSortDescriptor', 'NEVPNIKEv2SecurityAssociationParameters', 'CMStepCounter', 'NSNetService', 'AVAssetWriterInputMetadataAdaptor', 'UICollectionView', 'UIViewPrintFormatter', 'SCNLevelOfDetail', 'CAShapeLayer', 'MCPeerID', 'MPRatingCommand', 'WKNavigation', 'NSDictionary', 'NSFileVersion', 'CMGyroData', 'AVAudioUnitDistortion', 'CKFetchRecordsOperation', 'SKPhysicsJointSpring', 'SCNHitTestResult', 'AVAudioTime', 'CIFilter', 'UIView', 'SCNConstraint', 'CAPropertyAnimation', 'MKMapItem', 'MPRemoteCommandCenter', 'PKPaymentSummaryItem', 'UICollectionViewFlowLayoutInvalidationContext', 'UIInputViewController', 'PKPass', 'SCNPhysicsBehavior', 'MTLRenderPassColorAttachmentDescriptor', 'MKPolygonRenderer', 'CKNotification', 'JSValue', 'PHCollectionList', 'CLGeocoder', 'NSByteCountFormatter', 'AVCaptureScreenInput', 'MPFeedbackCommand', 'CAAnimation', 'MKOverlayPathView', 'UIActionSheet', 'UIMotionEffectGroup', 'NSLengthFormatter', 'UIBarItem', 'SKProduct', 'AVAssetExportSession', 'NSKeyedUnarchiver', 'NSMutableSet', 'SCNPyramid', 'PHAssetCollection', 'MKMapView', 'HMHomeManager', 'CATransition', 'MTLCompileOptions', 'UIVibrancyEffect', 'CLCircularRegion', 'MKTileOverlay', 'SCNShape', 'ACAccountCredential', 'SKPhysicsJointLimit', 'MKMapSnapshotter', 'AVMediaSelectionGroup', 'NSIndexSet', 'CBPeripheralManager', 'CKRecordZone', 'AVAudioRecorder', 'NSURL', 'CBCentral', 'NSNumber', 'AVAudioOutputNode', 'MTLVertexAttributeDescriptorArray', 'MKETAResponse', 'SKTransition', 'SSReadingList', 'HKSourceQuery', 'UITableViewRowAction', 'UITableView', 'SCNParticlePropertyController', 'AVCaptureStillImageOutput', 'GCController', 'AVAudioPlayerNode', 'AVAudioSessionPortDescription', 'NSHTTPURLResponse', 'NEOnDemandRuleEvaluateConnection', 'SKEffectNode', 'HKQuantity', 'GCControllerElement', 'AVPlayerItemAccessLogEvent', 'SCNBox', 'NSExtensionContext', 'MKOverlayRenderer', 'SCNPhysicsVehicle', 'NSDecimalNumber', 'EKReminder', 'MKPolylineView', 'CKQuery', 'AVAudioMixerNode', 'GKAchievementDescription', 'EKParticipant', 'NSBlockOperation', 'UIActivityItemProvider', 'CLLocation', 'NSBatchUpdateRequest', 'PHContentEditingOutput', 'PHObjectChangeDetails', 'HKWorkoutType', 'MPMoviePlayerController', 'AVAudioFormat', 'HMTrigger', 'MTLRenderPassDepthAttachmentDescriptor', 'SCNRenderer', 'GKScore', 'UISplitViewController', 'HKSource', 'NSURLConnection', 'ABUnknownPersonViewController', 'SCNTechnique', 'UIMenuController', 'NSEvent', 'SKTextureAtlas', 'NSKeyedArchiver', 'GKLeaderboardSet', 'NSSimpleCString', 'AVAudioPCMBuffer', 'CBATTRequest', 'GKMatchRequest', 'AVMetadataObject', 'SKProductsRequest', 'UIAlertView', 'NSIncrementalStore', 'MFMailComposeViewController', 'SCNFloor', 'NSSortDescriptor', 'CKFetchNotificationChangesOperation', 'MPMovieAccessLog', 'NSManagedObjectContext', 'AVAudioUnitGenerator', 'WKBackForwardList', 'SKMutableTexture', 'AVCaptureAudioDataOutput', 'ACAccount', 'AVMetadataItem', 'MPRatingCommandEvent', 'AVCaptureDeviceInputSource', 'CLLocationManager', 'MPRemoteCommand', 'AVCaptureSession', 'UIStepper', 'UIRefreshControl', 'NEEvaluateConnectionRule', 'CKModifyRecordsOperation', 'UICollectionViewTransitionLayout', 'CBCentralManager', 'NSPurgeableData', 'PKShippingMethod', 'SLComposeViewController', 'NSHashTable', 'MKUserTrackingBarButtonItem', 'UILexiconEntry', 'CMMotionActivity', 'SKAction', 'SKShader', 'AVPlayerItemOutput', 'MTLRenderPassAttachmentDescriptor', 'UIDocumentInteractionController', 'UIDynamicItemBehavior', 'NSMutableDictionary', 'UILabel', 'AVCaptureInputPort', 'NSExpression', 'CAInterAppAudioTransportView', 'SKMutablePayment', 'UIImage', 'PHCachingImageManager', 'SCNTransformConstraint', 'HKCorrelationType', 'UIColor', 'SCNGeometrySource', 'AVCaptureAutoExposureBracketedStillImageSettings', 'UIPopoverBackgroundView', 'UIToolbar', 'NSNotificationCenter', 'UICollectionViewLayoutAttributes', 'AVAssetReaderOutputMetadataAdaptor', 'NSEntityMigrationPolicy', 'HMUser', 'NSLocale', 'NSURLSession', 'SCNCamera', 'NSTimeZone', 'UIManagedDocument', 'AVMutableVideoCompositionLayerInstruction', 'AVAssetTrackGroup', 'NSInvocationOperation', 'ALAssetRepresentation', 'AVQueuePlayer', 'HMServiceGroup', 'UIPasteboard', 'PHContentEditingInput', 'NSLayoutManager', 'EKCalendarChooser', 'EKObject', 'CATiledLayer', 'GLKReflectionMapEffect', 'NSManagedObjectID', 'NSEnergyFormatter', 'SLRequest', 'HMCharacteristic', 'AVPlayerLayer', 'MTLRenderPassDescriptor', 'SKPayment', 'NSPointerArray', 'AVAudioMix', 'SCNLight', 'MCAdvertiserAssistant', 'MKMapSnapshotOptions', 'HKCategorySample', 'AVAudioEnvironmentReverbParameters', 'SCNMorpher', 'AVTimedMetadataGroup', 'CBMutableCharacteristic', 'NSFetchRequest', 'UIDevice', 'NSManagedObject', 'NKAssetDownload', 'AVOutputSettingsAssistant', 'SKPhysicsJointPin', 'UITabBar', 'UITextInputMode', 'NSFetchRequestExpression', 'HMActionSet', 'CTSubscriber', 'PHAssetChangeRequest', 'NSPersistentStoreRequest', 'UITabBarController', 'HKQuantitySample', 'AVPlayerItem', 'AVSynchronizedLayer', 'MKDirectionsRequest', 'NSMetadataItem', 'UIPresentationController', 'UINavigationItem', 'PHFetchResultChangeDetails', 'PHImageManager', 'AVCaptureManualExposureBracketedStillImageSettings', 'UIStoryboardPopoverSegue', 'SCNLookAtConstraint', 'UIGravityBehavior', 'UIWindow', 'CBMutableDescriptor', 'NEOnDemandRuleDisconnect', 'UIBezierPath', 'UINavigationController', 'ABPeoplePickerNavigationController', 'EKSource', 'AVAssetWriterInput', 'AVPlayerItemTrack', 'GLKEffectPropertyTexture', 'NSHTTPCookie', 'NSURLResponse', 'SKPaymentQueue', 'NSAssertionHandler', 'MKReverseGeocoder', 'GCControllerAxisInput', 'NSArray', 'NSOrthography', 'NSURLSessionUploadTask', 'NSCharacterSet', 'AVMutableVideoCompositionInstruction', 'AVAssetReaderOutput', 'EAGLContext', 'WKFrameInfo', 'CMPedometer', 'MyClass', 'CKModifyBadgeOperation', 'AVCaptureAudioFileOutput', 'SKEmitterNode', 'NSMachPort', 'AVVideoCompositionCoreAnimationTool', 'PHCollection', 'SCNPhysicsWorld', 'NSURLRequest', 'CMAccelerometerData', 'NSNetServiceBrowser', 'CLFloor', 'AVAsynchronousVideoCompositionRequest', 'SCNGeometry', 'SCNIKConstraint', 'CIKernel', 'CAGradientLayer', 'HKCharacteristicType', 'NSFormatter', 'SCNAction', 'CATransaction', 'CBUUID', 'UIStoryboard', 'MPMediaLibrary', 'UITapGestureRecognizer', 'MPMediaItemArtwork', 'NSURLSessionTask', 'AVAudioUnit', 'MCBrowserViewController', 'UIFontDescriptor', 'NSRelationshipDescription', 'HKSample', 'WKWebView', 'NSMutableAttributedString', 'NSPersistentStoreAsynchronousResult', 'MPNowPlayingInfoCenter', 'MKLocalSearch', 'EAAccessory', 'HKCorrelation', 'CATextLayer', 'NSNotificationQueue', 'UINib', 'GLKTextureLoader', 'HKObjectType', 'NSValue', 'NSMutableIndexSet', 'SKPhysicsContact', 'NSProgress', 'AVPlayerViewController', 'CAScrollLayer', 'GKSavedGame', 'NSTextCheckingResult', 'PHObjectPlaceholder', 'SKConstraint', 'EKEventEditViewController', 'NSEntityDescription', 'NSURLCredentialStorage', 'UIApplication', 'SKDownload', 'SCNNode', 'MKLocalSearchRequest', 'SKScene', 'UISearchDisplayController', 'NEOnDemandRule', 'MTLRenderPassStencilAttachmentDescriptor', 'CAReplicatorLayer', 'UIPrintPageRenderer', 'EKCalendarItem', 'NSUUID', 'EAAccessoryManager', 'NEOnDemandRuleIgnore', 'SKRegion', 'AVAssetResourceLoader', 'EAWiFiUnconfiguredAccessoryBrowser', 'NSUserActivity', 'CTCall', 'UIPrinterPickerController', 'CIVector', 'UINavigationBar', 'UIPanGestureRecognizer', 'MPMediaQuery', 'ABNewPersonViewController', 'CKRecordZoneID', 'HKAnchoredObjectQuery', 'CKFetchRecordZonesOperation', 'UIStoryboardSegue', 'ACAccountType', 'GKSession', 'SKVideoNode', 'PHChange', 'SKReceiptRefreshRequest', 'GCExtendedGamepadSnapshot', 'MPSeekCommandEvent', 'GCExtendedGamepad', 'CAValueFunction', 'SCNCylinder', 'NSNotification', 'NSBatchUpdateResult', 'PKPushCredentials', 'SCNPhysicsSliderJoint', 'AVCaptureDeviceFormat', 'AVPlayerItemErrorLog', 'NSMapTable', 'NSSet', 'CMMotionManager', 'GKVoiceChatService', 'UIPageControl', 'UILexicon', 'MTLArrayType', 'AVAudioUnitReverb', 'MKGeodesicPolyline', 'AVMutableComposition', 'NSLayoutConstraint', 'UIPrinter', 'NSOrderedSet', 'CBAttribute', 'PKPushPayload', 'NSIncrementalStoreNode', 'EKEventStore', 'MPRemoteCommandEvent', 'UISlider', 'UIBlurEffect', 'CKAsset', 'AVCaptureInput', 'AVAudioEngine', 'MTLVertexDescriptor', 'SKPhysicsBody', 'NSOperation', 'PKPaymentPass', 'UIImageAsset', 'MKMapCamera', 'SKProductsResponse', 'GLKEffectPropertyMaterial', 'AVCaptureDevice', 'CTCallCenter', 'CABTMIDILocalPeripheralViewController', 'NEVPNManager', 'HKQuery', 'SCNPhysicsContact', 'CBMutableService', 'AVSampleBufferDisplayLayer', 'SCNSceneSource', 'SKLightNode', 'CKDiscoveredUserInfo', 'NSMutableArray', 'MTLDepthStencilDescriptor', 'MTLArgument', 'NSMassFormatter', 'CIRectangleFeature', 'PKPushRegistry', 'NEVPNConnection', 'MCNearbyServiceBrowser', 'NSOperationQueue', 'MKPolylineRenderer', 'HKWorkout', 'NSValueTransformer', 'UICollectionViewFlowLayout', 'MPChangePlaybackRateCommandEvent', 'NSEntityMapping', 'SKTexture', 'NSMergePolicy', 'UITextInputStringTokenizer', 'NSRecursiveLock', 'AVAsset', 'NSUndoManager', 'AVAudioUnitSampler', 'NSItemProvider', 'SKUniform', 'MPMediaPickerController', 'CKOperation', 'MTLRenderPipelineDescriptor', 'EAWiFiUnconfiguredAccessory', 'NSFileCoordinator', 'SKRequest', 'NSFileHandle', 'NSConditionLock', 'UISegmentedControl', 'NSManagedObjectModel', 'UITabBarItem', 'SCNCone', 'MPMediaItem', 'SCNMaterial', 'EKRecurrenceRule', 'UIEvent', 'UITouch', 'UIPrintInteractionController', 'CMDeviceMotion', 'NEVPNProtocol', 'NSCompoundPredicate', 'HKHealthStore', 'MKMultiPoint', 'HKSampleType', 'UIPrintFormatter', 'AVAudioUnitEQFilterParameters', 'SKView', 'NSConstantString', 'UIPopoverController', 'CKDatabase', 'AVMetadataFaceObject', 'UIAccelerometer', 'EKEventViewController', 'CMAltitudeData', 'MTLStencilDescriptor', 'UISwipeGestureRecognizer', 'NSPort', 'MKCircleRenderer', 'AVCompositionTrack', 'NSAsynchronousFetchRequest', 'NSUbiquitousKeyValueStore', 'NSMetadataQueryResultGroup', 'AVAssetResourceLoadingDataRequest', 'UITableViewHeaderFooterView', 'CKNotificationID', 'AVAudioSession', 'HKUnit', 'NSNull', 'NSPersistentStoreResult', 'MKCircleView', 'AVAudioChannelLayout', 'NEVPNProtocolIKEv2', 'WKProcessPool', 'UIAttachmentBehavior', 'CLBeacon', 'NSInputStream', 'NSURLCache', 'GKPlayer', 'NSMappingModel', 'CIQRCodeFeature', 'AVMutableVideoComposition', 'PHFetchResult', 'NSAttributeDescription', 'AVPlayer', 'MKAnnotationView', 'PKPaymentRequest', 'NSTimer', 'CBDescriptor', 'MKOverlayView', 'AVAudioUnitTimePitch', 'NSSaveChangesRequest', 'UIReferenceLibraryViewController', 'SKPhysicsJointFixed', 'UILocalizedIndexedCollation', 'UIInterpolatingMotionEffect', 'UIDocumentPickerViewController', 'AVAssetWriter', 'NSBundle', 'SKStoreProductViewController', 'GLKViewController', 'NSMetadataQueryAttributeValueTuple', 'GKTurnBasedMatch', 'AVAudioFile', 'UIActivity', 'NSPipe', 'MKShape', 'NSMergeConflict', 'CIImage', 'HKObject', 'UIRotationGestureRecognizer', 'AVPlayerItemLegibleOutput', 'AVAssetImageGenerator', 'GCControllerButtonInput', 'CKMarkNotificationsReadOperation', 'CKSubscription', 'MPTimedMetadata', 'NKIssue', 'UIScreenMode', 'HMAccessoryBrowser', 'GKTurnBasedEventHandler', 'UIWebView', 'MKPolyline', 'JSVirtualMachine', 'AVAssetReader', 'NSAttributedString', 'GKMatchmakerViewController', 'NSCountedSet', 'UIButton', 'WKNavigationResponse', 'GKLocalPlayer', 'MPMovieErrorLog', 'AVSpeechUtterance', 'HKStatistics', 'UILocalNotification', 'HKBiologicalSexObject', 'AVURLAsset', 'CBPeripheral', 'NSDateComponentsFormatter', 'SKSpriteNode', 'UIAccessibilityElement', 'AVAssetWriterInputGroup', 'HMZone', 'AVAssetReaderAudioMixOutput', 'NSEnumerator', 'UIDocument', 'MKLocalSearchResponse', 'UISimpleTextPrintFormatter', 'PHPhotoLibrary', 'CBService', 'UIDocumentMenuViewController', 'MCSession', 'QLPreviewController', 'CAMediaTimingFunction', 'UITextPosition', 'ASIdentifierManager', 'AVAssetResourceLoadingRequest', 'SLComposeServiceViewController', 'UIPinchGestureRecognizer', 'PHObject', 'NSExtensionItem', 'HKSampleQuery', 'MTLRenderPipelineColorAttachmentDescriptorArray', 'MKRouteStep', 'SCNCapsule', 'NSMetadataQuery', 'AVAssetResourceLoadingContentInformationRequest', 'UITraitCollection', 'CTCarrier', 'NSFileSecurity', 'UIAcceleration', 'UIMotionEffect', 'MTLRenderPipelineReflection', 'CLHeading', 'CLVisit', 'MKDirectionsResponse', 'HMAccessory', 'MTLStructType', 'UITextView', 'CMMagnetometerData', 'UICollisionBehavior', 'UIProgressView', 'CKServerChangeToken', 'UISearchBar', 'MKPlacemark', 'AVCaptureConnection', 'NSPropertyMapping', 'ALAssetsFilter', 'SK3DNode', 'AVPlayerItemErrorLogEvent', 'NSJSONSerialization', 'AVAssetReaderVideoCompositionOutput', 'ABPersonViewController', 'CIDetector', 'GKTurnBasedMatchmakerViewController', 'MPMediaItemCollection', 'SCNSphere', 'NSCondition', 'NSURLCredential', 'MIDINetworkConnection', 'NSFileProviderExtension', 'NSDecimalNumberHandler', 'NSAtomicStoreCacheNode', 'NSAtomicStore', 'EKAlarm', 'CKNotificationInfo', 'AVAudioUnitEQ', 'UIPercentDrivenInteractiveTransition', 'MKPolygon', 'AVAssetTrackSegment', 'MTLVertexAttribute', 'NSExpressionDescription', 'HKStatisticsCollectionQuery', 'NSURLAuthenticationChallenge', 'NSDirectoryEnumerator', 'MKDistanceFormatter', 'UIAlertAction', 'NSPropertyListSerialization', 'GKPeerPickerController', 'UIUserNotificationSettings', 'UITableViewController', 'GKNotificationBanner', 'MKPointAnnotation', 'MTLRenderPassColorAttachmentDescriptorArray', 'NSCache', 'SKPhysicsJoint', 'NSXMLParser', 'UIViewController', 'PKPaymentToken', 'MFMessageComposeViewController', 'AVAudioInputNode', 'NSDataDetector', 'CABTMIDICentralViewController', 'AVAudioUnitMIDIInstrument', 'AVCaptureVideoPreviewLayer', 'AVAssetWriterInputPassDescription', 'MPChangePlaybackRateCommand', 'NSURLComponents', 'CAMetalLayer', 'UISnapBehavior', 'AVMetadataMachineReadableCodeObject', 'CKDiscoverUserInfosOperation', 'NSTextAttachment', 'NSException', 'UIMenuItem', 'CMMotionActivityManager', 'SCNGeometryElement', 'NCWidgetController', 'CAEmitterLayer', 'MKUserLocation', 'UIImagePickerController', 'CIFeature', 'AVCaptureDeviceInput', 'ALAsset', 'NSURLSessionDownloadTask', 'SCNPhysicsHingeJoint', 'MPMoviePlayerViewController', 'NSMutableOrderedSet', 'SCNMaterialProperty', 'UIFont', 'AVCaptureVideoDataOutput', 'NSCachedURLResponse', 'ALAssetsLibrary', 'NSInvocation', 'UILongPressGestureRecognizer', 'NSTextStorage', 'WKWebViewConfiguration', 'CIFaceFeature', 'MKMapSnapshot', 'GLKEffectPropertyFog', 'AVComposition', 'CKDiscoverAllContactsOperation', 'AVAudioMixInputParameters', 'CAEmitterBehavior', 'PKPassLibrary', 'UIMutableUserNotificationCategory', 'NSLock', 'NEVPNProtocolIPSec', 'ADBannerView', 'UIDocumentPickerExtensionViewController', 'UIActivityIndicatorView', 'AVPlayerMediaSelectionCriteria', 'CALayer', 'UIAccessibilityCustomAction', 'UIBarButtonItem', 'AVAudioSessionRouteDescription', 'CLBeaconRegion', 'HKBloodTypeObject', 'MTLVertexBufferLayoutDescriptorArray', 'CABasicAnimation', 'AVVideoCompositionInstruction', 'AVMutableTimedMetadataGroup', 'EKRecurrenceEnd', 'NSTextContainer', 'TWTweetComposeViewController', 'PKPaymentAuthorizationViewController', 'UIScrollView', 'WKNavigationAction', 'AVPlayerItemMetadataOutput', 'EKRecurrenceDayOfWeek', 'NSNumberFormatter', 'MTLComputePipelineReflection', 'UIScreen', 'CLRegion', 'NSProcessInfo', 'GLKTextureInfo', 'SCNSkinner', 'AVCaptureMetadataOutput', 'SCNAnimationEvent', 'NSTextTab', 'JSManagedValue', 'NSDate', 'UITextChecker', 'WKBackForwardListItem', 'NSData', 'NSParagraphStyle', 'AVMutableMetadataItem', 'EKCalendar', 'HKWorkoutEvent', 'NSMutableURLRequest', 'UIVideoEditorController', 'HMTimerTrigger', 'AVAudioUnitVarispeed', 'UIDynamicAnimator', 'AVCompositionTrackSegment', 'GCGamepadSnapshot', 'MPMediaEntity', 'GLKSkyboxEffect', 'UISwitch', 'EKStructuredLocation', 'UIGestureRecognizer', 'NSProxy', 'GLKBaseEffect', 'UIPushBehavior', 'GKScoreChallenge', 'NSCoder', 'MPMediaPlaylist', 'NSDateComponents', 'WKUserScript', 'EKEvent', 'NSDateFormatter', 'NSAsynchronousFetchResult', 'AVAssetWriterInputPixelBufferAdaptor', 'UIVisualEffect', 'UICollectionViewCell', 'UITextField', 'CLPlacemark', 'MPPlayableContentManager', 'AVCaptureOutput', 'HMCharacteristicWriteAction', 'CKModifySubscriptionsOperation', 'NSPropertyDescription', 'GCGamepad', 'UIMarkupTextPrintFormatter', 'SCNTube', 'NSPersistentStoreCoordinator', 'AVAudioEnvironmentNode', 'GKMatchmaker', 'CIContext', 'NSThread', 'SLComposeSheetConfigurationItem', 'SKPhysicsJointSliding', 'NSPredicate', 'GKVoiceChat', 'SKCropNode', 'AVCaptureAudioPreviewOutput', 'NSStringDrawingContext', 'GKGameCenterViewController', 'UIPrintPaper', 'SCNPhysicsBallSocketJoint', 'UICollectionViewLayoutInvalidationContext', 'GLKEffectPropertyTransform', 'AVAudioIONode', 'UIDatePicker', 'MKDirections', 'ALAssetsGroup', 'CKRecordZoneNotification', 'SCNScene', 'MPMovieAccessLogEvent', 'CKFetchSubscriptionsOperation', 'CAEmitterCell', 'AVAudioUnitTimeEffect', 'HMCharacteristicMetadata', 'MKPinAnnotationView', 'UIPickerView', 'UIImageView', 'UIUserNotificationCategory', 'SCNPhysicsVehicleWheel', 'HKCategoryType', 'MPMediaQuerySection', 'GKFriendRequestComposeViewController', 'NSError', 'MTLRenderPipelineColorAttachmentDescriptor', 'SCNPhysicsShape', 'UISearchController', 'SCNPhysicsBody', 'CTSubscriberInfo', 'AVPlayerItemAccessLog', 'MPMediaPropertyPredicate', 'CMLogItem', 'NSAutoreleasePool', 'NSSocketPort', 'AVAssetReaderTrackOutput', 'SKNode', 'UIMutableUserNotificationAction', 'SCNProgram', 'AVSpeechSynthesisVoice', 'CMAltimeter', 'AVCaptureAudioChannel', 'GKTurnBasedExchangeReply', 'AVVideoCompositionLayerInstruction', 'AVSpeechSynthesizer', 'GKChallengeEventHandler', 'AVCaptureFileOutput', 'UIControl', 'SCNPhysicsField', 'CKReference', 'LAContext', 'CKRecordID', 'ADInterstitialAd', 'AVAudioSessionDataSourceDescription', 'AVAudioBuffer', 'CIColorKernel', 'GCControllerDirectionPad', 'NSFileManager', 'AVMutableAudioMixInputParameters', 'UIScreenEdgePanGestureRecognizer', 'CAKeyframeAnimation', 'CKQueryNotification', 'PHAdjustmentData', 'EASession', 'AVAssetResourceRenewalRequest', 'UIInputView', 'NSFileWrapper', 'UIResponder', 'NSPointerFunctions', 'UIKeyCommand', 'NSHTTPCookieStorage', 'AVMediaSelectionOption', 'NSRunLoop', 'NSFileAccessIntent', 'CAAnimationGroup', 'MKCircle', 'UIAlertController', 'NSMigrationManager', 'NSDateIntervalFormatter', 'UICollectionViewUpdateItem', 'CKDatabaseOperation', 'PHImageRequestOptions', 'SKReachConstraints', 'CKRecord', 'CAInterAppAudioSwitcherView', 'WKWindowFeatures', 'GKInvite', 'NSMutableData', 'PHAssetCollectionChangeRequest', 'NSMutableParagraphStyle', 'UIDynamicBehavior', 'GLKEffectProperty', 'CKFetchRecordChangesOperation', 'SKShapeNode', 'MPMovieErrorLogEvent', 'MKPolygonView', 'MPContentItem', 'HMAction', 'NSScanner', 'GKAchievementChallenge', 'AVAudioPlayer', 'CKContainer', 'AVVideoComposition', 'NKLibrary', 'NSPersistentStore', 'AVCaptureMovieFileOutput', 'HMRoom', 'GKChallenge', 'UITextRange', 'NSURLProtectionSpace', 'ACAccountStore', 'MPSkipIntervalCommand', 'NSComparisonPredicate', 'HMHome', 'PHVideoRequestOptions', 'NSOutputStream', 'MPSkipIntervalCommandEvent', 'PKAddPassesViewController', 'UITextSelectionRect', 'CTTelephonyNetworkInfo', 'AVTextStyleRule', 'NSFetchedPropertyDescription', 'UIPageViewController', 'CATransformLayer', 'UICollectionViewController', 'AVAudioNode', 'MCNearbyServiceAdvertiser', 'NSObject', 'PHAsset', 'GKLeaderboardViewController', 'CKQueryCursor', 'MPMusicPlayerController', 'MKOverlayPathRenderer', 'CMPedometerData', 'HMService', 'SKFieldNode', 'GKAchievement', 'WKUserContentController', 'AVAssetTrack', 'TWRequest', 'SKLabelNode', 'AVCaptureBracketedStillImageSettings', 'MIDINetworkHost', 'MPMediaPredicate', 'AVFrameRateRange', 'MTLTextureDescriptor', 'MTLVertexBufferLayoutDescriptor', 'MPFeedbackCommandEvent', 'UIUserNotificationAction', 'HKStatisticsQuery', 'SCNParticleSystem', 'NSIndexPath', 'AVVideoCompositionRenderContext', 'CADisplayLink', 'HKObserverQuery', 'UIPopoverPresentationController', 'CKQueryOperation', 'CAEAGLLayer', 'NSMutableString', 'NSMessagePort', 'NSURLQueryItem', 'MTLStructMember', 'AVAudioSessionChannelDescription', 'GLKView', 'UIActivityViewController', 'GKAchievementViewController', 'GKTurnBasedParticipant', 'NSURLProtocol', 'NSUserDefaults', 'NSCalendar', 'SKKeyframeSequence', 'AVMetadataItemFilter', 'CKModifyRecordZonesOperation', 'WKPreferences', 'NSMethodSignature', 'NSRegularExpression', 'EAGLSharegroup', 'AVPlayerItemVideoOutput', 'PHContentEditingInputRequestOptions', 'GKMatch', 'CIColor', 'UIDictationPhrase']) -COCOA_PROTOCOLS = set(['SKStoreProductViewControllerDelegate', 'AVVideoCompositionInstruction', 'AVAudioSessionDelegate', 'GKMatchDelegate', 'NSFileManagerDelegate', 'UILayoutSupport', 'NSCopying', 'UIPrintInteractionControllerDelegate', 'QLPreviewControllerDataSource', 'SKProductsRequestDelegate', 'NSTextStorageDelegate', 'MCBrowserViewControllerDelegate', 'MTLComputeCommandEncoder', 'SCNSceneExportDelegate', 'UISearchResultsUpdating', 'MFMailComposeViewControllerDelegate', 'MTLBlitCommandEncoder', 'NSDecimalNumberBehaviors', 'PHContentEditingController', 'NSMutableCopying', 'UIActionSheetDelegate', 'UIViewControllerTransitioningDelegate', 'UIAlertViewDelegate', 'AVAudioPlayerDelegate', 'MKReverseGeocoderDelegate', 'NSCoding', 'UITextInputTokenizer', 'GKFriendRequestComposeViewControllerDelegate', 'UIActivityItemSource', 'NSCacheDelegate', 'UIAdaptivePresentationControllerDelegate', 'GKAchievementViewControllerDelegate', 'UIViewControllerTransitionCoordinator', 'EKEventEditViewDelegate', 'NSURLConnectionDelegate', 'UITableViewDelegate', 'GKPeerPickerControllerDelegate', 'UIGuidedAccessRestrictionDelegate', 'AVSpeechSynthesizerDelegate', 'AVAudio3DMixing', 'AVPlayerItemLegibleOutputPushDelegate', 'ADInterstitialAdDelegate', 'HMAccessoryBrowserDelegate', 'AVAssetResourceLoaderDelegate', 'UITabBarControllerDelegate', 'CKRecordValue', 'SKPaymentTransactionObserver', 'AVCaptureAudioDataOutputSampleBufferDelegate', 'UIInputViewAudioFeedback', 'GKChallengeListener', 'SKSceneDelegate', 'UIPickerViewDelegate', 'UIWebViewDelegate', 'UIApplicationDelegate', 'GKInviteEventListener', 'MPMediaPlayback', 'MyClassJavaScriptMethods', 'AVAsynchronousKeyValueLoading', 'QLPreviewItem', 'SCNBoundingVolume', 'NSPortDelegate', 'UIContentContainer', 'SCNNodeRendererDelegate', 'SKRequestDelegate', 'SKPhysicsContactDelegate', 'HMAccessoryDelegate', 'UIPageViewControllerDataSource', 'SCNSceneRendererDelegate', 'SCNPhysicsContactDelegate', 'MKMapViewDelegate', 'AVPlayerItemOutputPushDelegate', 'UICollectionViewDelegate', 'UIImagePickerControllerDelegate', 'MTLRenderCommandEncoder', 'PKPaymentAuthorizationViewControllerDelegate', 'UIToolbarDelegate', 'WKUIDelegate', 'SCNActionable', 'NSURLConnectionDataDelegate', 'MKOverlay', 'CBCentralManagerDelegate', 'JSExport', 'NSTextLayoutOrientationProvider', 'UIPickerViewDataSource', 'PKPushRegistryDelegate', 'UIViewControllerTransitionCoordinatorContext', 'NSLayoutManagerDelegate', 'MTLLibrary', 'NSFetchedResultsControllerDelegate', 'ABPeoplePickerNavigationControllerDelegate', 'MTLResource', 'NSDiscardableContent', 'UITextFieldDelegate', 'MTLBuffer', 'MTLSamplerState', 'GKGameCenterControllerDelegate', 'MPMediaPickerControllerDelegate', 'UISplitViewControllerDelegate', 'UIAppearance', 'UIPickerViewAccessibilityDelegate', 'UITraitEnvironment', 'UIScrollViewAccessibilityDelegate', 'ADBannerViewDelegate', 'MPPlayableContentDataSource', 'MTLComputePipelineState', 'NSURLSessionDelegate', 'MTLCommandBuffer', 'NSXMLParserDelegate', 'UIViewControllerRestoration', 'UISearchBarDelegate', 'UIBarPositioning', 'CBPeripheralDelegate', 'UISearchDisplayDelegate', 'CAAction', 'PKAddPassesViewControllerDelegate', 'MCNearbyServiceAdvertiserDelegate', 'MTLDepthStencilState', 'GKTurnBasedMatchmakerViewControllerDelegate', 'MPPlayableContentDelegate', 'AVCaptureVideoDataOutputSampleBufferDelegate', 'UIAppearanceContainer', 'UIStateRestoring', 'UITextDocumentProxy', 'MTLDrawable', 'NSURLSessionTaskDelegate', 'NSFilePresenter', 'AVAudioStereoMixing', 'UIViewControllerContextTransitioning', 'UITextInput', 'CBPeripheralManagerDelegate', 'UITextInputDelegate', 'NSFastEnumeration', 'NSURLAuthenticationChallengeSender', 'SCNProgramDelegate', 'AVVideoCompositing', 'SCNAnimatable', 'NSSecureCoding', 'MCAdvertiserAssistantDelegate', 'GKLocalPlayerListener', 'GLKNamedEffect', 'UIPopoverControllerDelegate', 'AVCaptureMetadataOutputObjectsDelegate', 'NSExtensionRequestHandling', 'UITextSelecting', 'UIPrinterPickerControllerDelegate', 'NCWidgetProviding', 'MTLCommandEncoder', 'NSURLProtocolClient', 'MFMessageComposeViewControllerDelegate', 'UIVideoEditorControllerDelegate', 'WKNavigationDelegate', 'GKSavedGameListener', 'UITableViewDataSource', 'MTLFunction', 'EKCalendarChooserDelegate', 'NSUserActivityDelegate', 'UICollisionBehaviorDelegate', 'NSStreamDelegate', 'MCNearbyServiceBrowserDelegate', 'HMHomeDelegate', 'UINavigationControllerDelegate', 'MCSessionDelegate', 'UIDocumentPickerDelegate', 'UIViewControllerInteractiveTransitioning', 'GKTurnBasedEventListener', 'SCNSceneRenderer', 'MTLTexture', 'GLKViewDelegate', 'EAAccessoryDelegate', 'WKScriptMessageHandler', 'PHPhotoLibraryChangeObserver', 'NSKeyedUnarchiverDelegate', 'AVPlayerItemMetadataOutputPushDelegate', 'NSMachPortDelegate', 'SCNShadable', 'UIPopoverBackgroundViewMethods', 'UIDocumentMenuDelegate', 'UIBarPositioningDelegate', 'ABPersonViewControllerDelegate', 'NSNetServiceBrowserDelegate', 'EKEventViewDelegate', 'UIScrollViewDelegate', 'NSURLConnectionDownloadDelegate', 'UIGestureRecognizerDelegate', 'UINavigationBarDelegate', 'AVAudioMixing', 'NSFetchedResultsSectionInfo', 'UIDocumentInteractionControllerDelegate', 'MTLParallelRenderCommandEncoder', 'QLPreviewControllerDelegate', 'UIAccessibilityReadingContent', 'ABUnknownPersonViewControllerDelegate', 'GLKViewControllerDelegate', 'UICollectionViewDelegateFlowLayout', 'UIPopoverPresentationControllerDelegate', 'UIDynamicAnimatorDelegate', 'NSTextAttachmentContainer', 'MKAnnotation', 'UIAccessibilityIdentification', 'UICoordinateSpace', 'ABNewPersonViewControllerDelegate', 'MTLDevice', 'CAMediaTiming', 'AVCaptureFileOutputRecordingDelegate', 'HMHomeManagerDelegate', 'UITextViewDelegate', 'UITabBarDelegate', 'GKLeaderboardViewControllerDelegate', 'UISearchControllerDelegate', 'EAWiFiUnconfiguredAccessoryBrowserDelegate', 'UITextInputTraits', 'MTLRenderPipelineState', 'GKVoiceChatClient', 'UIKeyInput', 'UICollectionViewDataSource', 'SCNTechniqueSupport', 'NSLocking', 'AVCaptureFileOutputDelegate', 'GKChallengeEventHandlerDelegate', 'UIObjectRestoration', 'CIFilterConstructor', 'AVPlayerItemOutputPullDelegate', 'EAGLDrawable', 'AVVideoCompositionValidationHandling', 'UIViewControllerAnimatedTransitioning', 'NSURLSessionDownloadDelegate', 'UIAccelerometerDelegate', 'UIPageViewControllerDelegate', 'MTLCommandQueue', 'UIDataSourceModelAssociation', 'AVAudioRecorderDelegate', 'GKSessionDelegate', 'NSKeyedArchiverDelegate', 'CAMetalDrawable', 'UIDynamicItem', 'CLLocationManagerDelegate', 'NSMetadataQueryDelegate', 'NSNetServiceDelegate', 'GKMatchmakerViewControllerDelegate', 'NSURLSessionDataDelegate']) -COCOA_PRIMITIVES = set(['ROTAHeader', '__CFBundle', 'MortSubtable', 'AudioFilePacketTableInfo', 'CGPDFOperatorTable', 'KerxStateEntry', 'ExtendedTempoEvent', 'CTParagraphStyleSetting', 'OpaqueMIDIPort', '_GLKMatrix3', '_GLKMatrix2', '_GLKMatrix4', 'ExtendedControlEvent', 'CAFAudioDescription', 'OpaqueCMBlockBuffer', 'CGTextDrawingMode', 'EKErrorCode', 'gss_buffer_desc_struct', 'AudioUnitParameterInfo', '__SCPreferences', '__CTFrame', '__CTLine', 'AudioFile_SMPTE_Time', 'gss_krb5_lucid_context_v1', 'OpaqueJSValue', 'TrakTableEntry', 'AudioFramePacketTranslation', 'CGImageSource', 'OpaqueJSPropertyNameAccumulator', 'JustPCGlyphRepeatAddAction', '__CFBinaryHeap', 'OpaqueMIDIThruConnection', 'opaqueCMBufferQueue', 'OpaqueMusicSequence', 'MortRearrangementSubtable', 'MixerDistanceParams', 'MorxSubtable', 'MIDIObjectPropertyChangeNotification', 'SFNTLookupSegment', 'CGImageMetadataErrors', 'CGPath', 'OpaqueMIDIEndpoint', 'AudioComponentPlugInInterface', 'gss_ctx_id_t_desc_struct', 'sfntFontFeatureSetting', 'OpaqueJSContextGroup', '__SCNetworkConnection', 'AudioUnitParameterValueTranslation', 'CGImageMetadataType', 'CGPattern', 'AudioFileTypeAndFormatID', 'CGContext', 'AUNodeInteraction', 'SFNTLookupTable', 'JustPCDecompositionAction', 'KerxControlPointHeader', 'AudioStreamPacketDescription', 'KernSubtableHeader', '__SecCertificate', 'AUMIDIOutputCallbackStruct', 'MIDIMetaEvent', 'AudioQueueChannelAssignment', 'AnchorPoint', 'JustTable', '__CFNetService', 'CF_BRIDGED_TYPE', 'gss_krb5_lucid_key', 'CGPDFDictionary', 'KerxSubtableHeader', 'CAF_UUID_ChunkHeader', 'gss_krb5_cfx_keydata', 'OpaqueJSClass', 'CGGradient', 'OpaqueMIDISetup', 'JustPostcompTable', '__CTParagraphStyle', 'AudioUnitParameterHistoryInfo', 'OpaqueJSContext', 'CGShading', 'MIDIThruConnectionParams', 'BslnFormat0Part', 'SFNTLookupSingle', '__CFHost', '__SecRandom', '__CTFontDescriptor', '_NSRange', 'sfntDirectory', 'AudioQueueLevelMeterState', 'CAFPositionPeak', 'PropLookupSegment', '__CVOpenGLESTextureCache', 'sfntInstance', '_GLKQuaternion', 'AnkrTable', '__SCNetworkProtocol', 'CAFFileHeader', 'KerxOrderedListHeader', 'CGBlendMode', 'STXEntryOne', 'CAFRegion', 'SFNTLookupTrimmedArrayHeader', 'SCNMatrix4', 'KerxControlPointEntry', 'OpaqueMusicTrack', '_GLKVector4', 'gss_OID_set_desc_struct', 'OpaqueMusicPlayer', '_CFHTTPAuthentication', 'CGAffineTransform', 'CAFMarkerChunk', 'AUHostIdentifier', 'ROTAGlyphEntry', 'BslnTable', 'gss_krb5_lucid_context_version', '_GLKMatrixStack', 'CGImage', 'KernStateEntry', 'SFNTLookupSingleHeader', 'MortLigatureSubtable', 'CAFUMIDChunk', 'SMPTETime', 'CAFDataChunk', 'CGPDFStream', 'AudioFileRegionList', 'STEntryTwo', 'SFNTLookupBinarySearchHeader', 'OpbdTable', '__CTGlyphInfo', 'BslnFormat2Part', 'KerxIndexArrayHeader', 'TrakTable', 'KerxKerningPair', '__CFBitVector', 'KernVersion0SubtableHeader', 'OpaqueAudioComponentInstance', 'AudioChannelLayout', '__CFUUID', 'MIDISysexSendRequest', '__CFNumberFormatter', 'CGImageSourceStatus', 'AudioFileMarkerList', 'AUSamplerBankPresetData', 'CGDataProvider', 'AudioFormatInfo', '__SecIdentity', 'sfntCMapExtendedSubHeader', 'MIDIChannelMessage', 'KernOffsetTable', 'CGColorSpaceModel', 'MFMailComposeErrorCode', 'CGFunction', '__SecTrust', 'AVAudio3DAngularOrientation', 'CGFontPostScriptFormat', 'KernStateHeader', 'AudioUnitCocoaViewInfo', 'CGDataConsumer', 'OpaqueMIDIDevice', 'KernVersion0Header', 'AnchorPointTable', 'CGImageDestination', 'CAFInstrumentChunk', 'AudioUnitMeterClipping', 'MorxChain', '__CTFontCollection', 'STEntryOne', 'STXEntryTwo', 'ExtendedNoteOnEvent', 'CGColorRenderingIntent', 'KerxSimpleArrayHeader', 'MorxTable', '_GLKVector3', '_GLKVector2', 'MortTable', 'CGPDFBox', 'AudioUnitParameterValueFromString', '__CFSocket', 'ALCdevice_struct', 'MIDINoteMessage', 'sfntFeatureHeader', 'CGRect', '__SCNetworkInterface', '__CFTree', 'MusicEventUserData', 'TrakTableData', 'GCQuaternion', 'MortContextualSubtable', '__CTRun', 'AudioUnitFrequencyResponseBin', 'MortChain', 'MorxInsertionSubtable', 'CGImageMetadata', 'gss_auth_identity', 'AudioUnitMIDIControlMapping', 'CAFChunkHeader', 'CGImagePropertyOrientation', 'CGPDFScanner', 'OpaqueMusicEventIterator', 'sfntDescriptorHeader', 'AudioUnitNodeConnection', 'OpaqueMIDIDeviceList', 'ExtendedAudioFormatInfo', 'BslnFormat1Part', 'sfntFontDescriptor', 'KernSimpleArrayHeader', '__CFRunLoopObserver', 'CGPatternTiling', 'MIDINotification', 'MorxLigatureSubtable', 'MessageComposeResult', 'MIDIThruConnectionEndpoint', 'MusicDeviceStdNoteParams', 'opaqueCMSimpleQueue', 'ALCcontext_struct', 'OpaqueAudioQueue', 'PropLookupSingle', 'CGInterpolationQuality', 'CGColor', 'AudioOutputUnitStartAtTimeParams', 'gss_name_t_desc_struct', 'CGFunctionCallbacks', 'CAFPacketTableHeader', 'AudioChannelDescription', 'sfntFeatureName', 'MorxContextualSubtable', 'CVSMPTETime', 'AudioValueRange', 'CGTextEncoding', 'AudioStreamBasicDescription', 'AUNodeRenderCallback', 'AudioPanningInfo', 'KerxOrderedListEntry', '__CFAllocator', 'OpaqueJSPropertyNameArray', '__SCDynamicStore', 'OpaqueMIDIEntity', '__CTRubyAnnotation', 'SCNVector4', 'CFHostClientContext', 'CFNetServiceClientContext', 'AudioUnitPresetMAS_SettingData', 'opaqueCMBufferQueueTriggerToken', 'AudioUnitProperty', 'CAFRegionChunk', 'CGPDFString', '__GLsync', '__CFStringTokenizer', 'JustWidthDeltaEntry', 'sfntVariationAxis', '__CFNetDiagnostic', 'CAFOverviewSample', 'sfntCMapEncoding', 'CGVector', '__SCNetworkService', 'opaqueCMSampleBuffer', 'AUHostVersionIdentifier', 'AudioBalanceFade', 'sfntFontRunFeature', 'KerxCoordinateAction', 'sfntCMapSubHeader', 'CVPlanarPixelBufferInfo', 'AUNumVersion', 'AUSamplerInstrumentData', 'AUPreset', '__CTRunDelegate', 'OpaqueAudioQueueProcessingTap', 'KerxTableHeader', '_NSZone', 'OpaqueExtAudioFile', '__CFRunLoopSource', '__CVMetalTextureCache', 'KerxAnchorPointAction', 'OpaqueJSString', 'AudioQueueParameterEvent', '__CFHTTPMessage', 'OpaqueCMClock', 'ScheduledAudioFileRegion', 'STEntryZero', 'AVAudio3DPoint', 'gss_channel_bindings_struct', 'sfntVariationHeader', 'AUChannelInfo', 'UIOffset', 'GLKEffectPropertyPrv', 'KerxStateHeader', 'CGLineJoin', 'CGPDFDocument', '__CFBag', 'KernOrderedListHeader', '__SCNetworkSet', '__SecKey', 'MIDIObjectAddRemoveNotification', 'AudioUnitParameter', 'JustPCActionSubrecord', 'AudioComponentDescription', 'AudioUnitParameterValueName', 'AudioUnitParameterEvent', 'KerxControlPointAction', 'AudioTimeStamp', 'KernKerningPair', 'gss_buffer_set_desc_struct', 'MortFeatureEntry', 'FontVariation', 'CAFStringID', 'LcarCaretClassEntry', 'AudioUnitParameterStringFromValue', 'ACErrorCode', 'ALMXGlyphEntry', 'LtagTable', '__CTTypesetter', 'AuthorizationOpaqueRef', 'UIEdgeInsets', 'CGPathElement', 'CAFMarker', 'KernTableHeader', 'NoteParamsControlValue', 'SSLContext', 'gss_cred_id_t_desc_struct', 'AudioUnitParameterNameInfo', 'CGDataConsumerCallbacks', 'ALMXHeader', 'CGLineCap', 'MIDIControlTransform', 'CGPDFArray', '__SecPolicy', 'AudioConverterPrimeInfo', '__CTTextTab', '__CFNetServiceMonitor', 'AUInputSamplesInOutputCallbackStruct', '__CTFramesetter', 'CGPDFDataFormat', 'STHeader', 'CVPlanarPixelBufferInfo_YCbCrPlanar', 'MIDIValueMap', 'JustDirectionTable', '__SCBondStatus', 'SFNTLookupSegmentHeader', 'OpaqueCMMemoryPool', 'CGPathDrawingMode', 'CGFont', '__SCNetworkReachability', 'AudioClassDescription', 'CGPoint', 'AVAudio3DVectorOrientation', 'CAFStrings', '__CFNetServiceBrowser', 'opaqueMTAudioProcessingTap', 'sfntNameRecord', 'CGPDFPage', 'CGLayer', 'ComponentInstanceRecord', 'CAFInfoStrings', 'HostCallbackInfo', 'MusicDeviceNoteParams', 'OpaqueVTCompressionSession', 'KernIndexArrayHeader', 'CVPlanarPixelBufferInfo_YCbCrBiPlanar', 'MusicTrackLoopInfo', 'opaqueCMFormatDescription', 'STClassTable', 'sfntDirectoryEntry', 'OpaqueCMTimebase', 'CGDataProviderDirectCallbacks', 'MIDIPacketList', 'CAFOverviewChunk', 'MIDIPacket', 'ScheduledAudioSlice', 'CGDataProviderSequentialCallbacks', 'AudioBuffer', 'MorxRearrangementSubtable', 'CGPatternCallbacks', 'AUDistanceAttenuationData', 'MIDIIOErrorNotification', 'CGPDFContentStream', 'IUnknownVTbl', 'MIDITransform', 'MortInsertionSubtable', 'CABarBeatTime', 'AudioBufferList', '__CVBuffer', 'AURenderCallbackStruct', 'STXEntryZero', 'JustPCDuctilityAction', 'OpaqueAudioQueueTimeline', 'VTDecompressionOutputCallbackRecord', 'OpaqueMIDIClient', '__CFPlugInInstance', 'AudioQueueBuffer', '__CFFileDescriptor', 'AudioUnitConnection', '_GKTurnBasedExchangeStatus', 'LcarCaretTable', 'CVPlanarComponentInfo', 'JustWidthDeltaGroup', 'OpaqueAudioComponent', 'ParameterEvent', '__CVPixelBufferPool', '__CTFont', 'CGColorSpace', 'CGSize', 'AUDependentParameter', 'MIDIDriverInterface', 'gss_krb5_rfc1964_keydata', '__CFDateFormatter', 'LtagStringRange', 'OpaqueVTDecompressionSession', 'gss_iov_buffer_desc_struct', 'AUPresetEvent', 'PropTable', 'KernOrderedListEntry', 'CF_BRIDGED_MUTABLE_TYPE', 'gss_OID_desc_struct', 'AudioUnitPresetMAS_Settings', 'AudioFileMarker', 'JustPCConditionalAddAction', 'BslnFormat3Part', '__CFNotificationCenter', 'MortSwashSubtable', 'AUParameterMIDIMapping', 'SCNVector3', 'OpaqueAudioConverter', 'MIDIRawData', 'sfntNameHeader', '__CFRunLoop', 'MFMailComposeResult', 'CATransform3D', 'OpbdSideValues', 'CAF_SMPTE_Time', '__SecAccessControl', 'JustPCAction', 'OpaqueVTFrameSilo', 'OpaqueVTMultiPassStorage', 'CGPathElementType', 'AudioFormatListItem', 'AudioUnitExternalBuffer', 'AudioFileRegion', 'AudioValueTranslation', 'CGImageMetadataTag', 'CAFPeakChunk', 'AudioBytePacketTranslation', 'sfntCMapHeader', '__CFURLEnumerator', 'STXHeader', 'CGPDFObjectType', 'SFNTLookupArrayHeader']) +COCOA_INTERFACES = {'UITableViewCell', 'HKCorrelationQuery', 'NSURLSessionDataTask', 'PHFetchOptions', 'NSLinguisticTagger', 'NSStream', 'AVAudioUnitDelay', 'GCMotion', 'SKPhysicsWorld', 'NSString', 'CMAttitude', 'AVAudioEnvironmentDistanceAttenuationParameters', 'HKStatisticsCollection', 'SCNPlane', 'CBPeer', 'JSContext', 'SCNTransaction', 'SCNTorus', 'AVAudioUnitEffect', 'UICollectionReusableView', 'MTLSamplerDescriptor', 'AVAssetReaderSampleReferenceOutput', 'AVMutableCompositionTrack', 'GKLeaderboard', 'NSFetchedResultsController', 'SKRange', 'MKTileOverlayRenderer', 'MIDINetworkSession', 'UIVisualEffectView', 'CIWarpKernel', 'PKObject', 'MKRoute', 'MPVolumeView', 'UIPrintInfo', 'SCNText', 'ADClient', 'PKPayment', 'AVMutableAudioMix', 'GLKEffectPropertyLight', 'WKScriptMessage', 'AVMIDIPlayer', 'PHCollectionListChangeRequest', 'UICollectionViewLayout', 'NSMutableCharacterSet', 'SKPaymentTransaction', 'NEOnDemandRuleConnect', 'NSShadow', 'SCNView', 'NSURLSessionConfiguration', 'MTLVertexAttributeDescriptor', 'CBCharacteristic', 'HKQuantityType', 'CKLocationSortDescriptor', 'NEVPNIKEv2SecurityAssociationParameters', 'CMStepCounter', 'NSNetService', 'AVAssetWriterInputMetadataAdaptor', 'UICollectionView', 'UIViewPrintFormatter', 'SCNLevelOfDetail', 'CAShapeLayer', 'MCPeerID', 'MPRatingCommand', 'WKNavigation', 'NSDictionary', 'NSFileVersion', 'CMGyroData', 'AVAudioUnitDistortion', 'CKFetchRecordsOperation', 'SKPhysicsJointSpring', 'SCNHitTestResult', 'AVAudioTime', 'CIFilter', 'UIView', 'SCNConstraint', 'CAPropertyAnimation', 'MKMapItem', 'MPRemoteCommandCenter', 'PKPaymentSummaryItem', 'UICollectionViewFlowLayoutInvalidationContext', 'UIInputViewController', 'PKPass', 'SCNPhysicsBehavior', 'MTLRenderPassColorAttachmentDescriptor', 'MKPolygonRenderer', 'CKNotification', 'JSValue', 'PHCollectionList', 'CLGeocoder', 'NSByteCountFormatter', 'AVCaptureScreenInput', 'MPFeedbackCommand', 'CAAnimation', 'MKOverlayPathView', 'UIActionSheet', 'UIMotionEffectGroup', 'NSLengthFormatter', 'UIBarItem', 'SKProduct', 'AVAssetExportSession', 'NSKeyedUnarchiver', 'NSMutableSet', 'SCNPyramid', 'PHAssetCollection', 'MKMapView', 'HMHomeManager', 'CATransition', 'MTLCompileOptions', 'UIVibrancyEffect', 'CLCircularRegion', 'MKTileOverlay', 'SCNShape', 'ACAccountCredential', 'SKPhysicsJointLimit', 'MKMapSnapshotter', 'AVMediaSelectionGroup', 'NSIndexSet', 'CBPeripheralManager', 'CKRecordZone', 'AVAudioRecorder', 'NSURL', 'CBCentral', 'NSNumber', 'AVAudioOutputNode', 'MTLVertexAttributeDescriptorArray', 'MKETAResponse', 'SKTransition', 'SSReadingList', 'HKSourceQuery', 'UITableViewRowAction', 'UITableView', 'SCNParticlePropertyController', 'AVCaptureStillImageOutput', 'GCController', 'AVAudioPlayerNode', 'AVAudioSessionPortDescription', 'NSHTTPURLResponse', 'NEOnDemandRuleEvaluateConnection', 'SKEffectNode', 'HKQuantity', 'GCControllerElement', 'AVPlayerItemAccessLogEvent', 'SCNBox', 'NSExtensionContext', 'MKOverlayRenderer', 'SCNPhysicsVehicle', 'NSDecimalNumber', 'EKReminder', 'MKPolylineView', 'CKQuery', 'AVAudioMixerNode', 'GKAchievementDescription', 'EKParticipant', 'NSBlockOperation', 'UIActivityItemProvider', 'CLLocation', 'NSBatchUpdateRequest', 'PHContentEditingOutput', 'PHObjectChangeDetails', 'HKWorkoutType', 'MPMoviePlayerController', 'AVAudioFormat', 'HMTrigger', 'MTLRenderPassDepthAttachmentDescriptor', 'SCNRenderer', 'GKScore', 'UISplitViewController', 'HKSource', 'NSURLConnection', 'ABUnknownPersonViewController', 'SCNTechnique', 'UIMenuController', 'NSEvent', 'SKTextureAtlas', 'NSKeyedArchiver', 'GKLeaderboardSet', 'NSSimpleCString', 'AVAudioPCMBuffer', 'CBATTRequest', 'GKMatchRequest', 'AVMetadataObject', 'SKProductsRequest', 'UIAlertView', 'NSIncrementalStore', 'MFMailComposeViewController', 'SCNFloor', 'NSSortDescriptor', 'CKFetchNotificationChangesOperation', 'MPMovieAccessLog', 'NSManagedObjectContext', 'AVAudioUnitGenerator', 'WKBackForwardList', 'SKMutableTexture', 'AVCaptureAudioDataOutput', 'ACAccount', 'AVMetadataItem', 'MPRatingCommandEvent', 'AVCaptureDeviceInputSource', 'CLLocationManager', 'MPRemoteCommand', 'AVCaptureSession', 'UIStepper', 'UIRefreshControl', 'NEEvaluateConnectionRule', 'CKModifyRecordsOperation', 'UICollectionViewTransitionLayout', 'CBCentralManager', 'NSPurgeableData', 'PKShippingMethod', 'SLComposeViewController', 'NSHashTable', 'MKUserTrackingBarButtonItem', 'UILexiconEntry', 'CMMotionActivity', 'SKAction', 'SKShader', 'AVPlayerItemOutput', 'MTLRenderPassAttachmentDescriptor', 'UIDocumentInteractionController', 'UIDynamicItemBehavior', 'NSMutableDictionary', 'UILabel', 'AVCaptureInputPort', 'NSExpression', 'CAInterAppAudioTransportView', 'SKMutablePayment', 'UIImage', 'PHCachingImageManager', 'SCNTransformConstraint', 'HKCorrelationType', 'UIColor', 'SCNGeometrySource', 'AVCaptureAutoExposureBracketedStillImageSettings', 'UIPopoverBackgroundView', 'UIToolbar', 'NSNotificationCenter', 'UICollectionViewLayoutAttributes', 'AVAssetReaderOutputMetadataAdaptor', 'NSEntityMigrationPolicy', 'HMUser', 'NSLocale', 'NSURLSession', 'SCNCamera', 'NSTimeZone', 'UIManagedDocument', 'AVMutableVideoCompositionLayerInstruction', 'AVAssetTrackGroup', 'NSInvocationOperation', 'ALAssetRepresentation', 'AVQueuePlayer', 'HMServiceGroup', 'UIPasteboard', 'PHContentEditingInput', 'NSLayoutManager', 'EKCalendarChooser', 'EKObject', 'CATiledLayer', 'GLKReflectionMapEffect', 'NSManagedObjectID', 'NSEnergyFormatter', 'SLRequest', 'HMCharacteristic', 'AVPlayerLayer', 'MTLRenderPassDescriptor', 'SKPayment', 'NSPointerArray', 'AVAudioMix', 'SCNLight', 'MCAdvertiserAssistant', 'MKMapSnapshotOptions', 'HKCategorySample', 'AVAudioEnvironmentReverbParameters', 'SCNMorpher', 'AVTimedMetadataGroup', 'CBMutableCharacteristic', 'NSFetchRequest', 'UIDevice', 'NSManagedObject', 'NKAssetDownload', 'AVOutputSettingsAssistant', 'SKPhysicsJointPin', 'UITabBar', 'UITextInputMode', 'NSFetchRequestExpression', 'HMActionSet', 'CTSubscriber', 'PHAssetChangeRequest', 'NSPersistentStoreRequest', 'UITabBarController', 'HKQuantitySample', 'AVPlayerItem', 'AVSynchronizedLayer', 'MKDirectionsRequest', 'NSMetadataItem', 'UIPresentationController', 'UINavigationItem', 'PHFetchResultChangeDetails', 'PHImageManager', 'AVCaptureManualExposureBracketedStillImageSettings', 'UIStoryboardPopoverSegue', 'SCNLookAtConstraint', 'UIGravityBehavior', 'UIWindow', 'CBMutableDescriptor', 'NEOnDemandRuleDisconnect', 'UIBezierPath', 'UINavigationController', 'ABPeoplePickerNavigationController', 'EKSource', 'AVAssetWriterInput', 'AVPlayerItemTrack', 'GLKEffectPropertyTexture', 'NSHTTPCookie', 'NSURLResponse', 'SKPaymentQueue', 'NSAssertionHandler', 'MKReverseGeocoder', 'GCControllerAxisInput', 'NSArray', 'NSOrthography', 'NSURLSessionUploadTask', 'NSCharacterSet', 'AVMutableVideoCompositionInstruction', 'AVAssetReaderOutput', 'EAGLContext', 'WKFrameInfo', 'CMPedometer', 'MyClass', 'CKModifyBadgeOperation', 'AVCaptureAudioFileOutput', 'SKEmitterNode', 'NSMachPort', 'AVVideoCompositionCoreAnimationTool', 'PHCollection', 'SCNPhysicsWorld', 'NSURLRequest', 'CMAccelerometerData', 'NSNetServiceBrowser', 'CLFloor', 'AVAsynchronousVideoCompositionRequest', 'SCNGeometry', 'SCNIKConstraint', 'CIKernel', 'CAGradientLayer', 'HKCharacteristicType', 'NSFormatter', 'SCNAction', 'CATransaction', 'CBUUID', 'UIStoryboard', 'MPMediaLibrary', 'UITapGestureRecognizer', 'MPMediaItemArtwork', 'NSURLSessionTask', 'AVAudioUnit', 'MCBrowserViewController', 'UIFontDescriptor', 'NSRelationshipDescription', 'HKSample', 'WKWebView', 'NSMutableAttributedString', 'NSPersistentStoreAsynchronousResult', 'MPNowPlayingInfoCenter', 'MKLocalSearch', 'EAAccessory', 'HKCorrelation', 'CATextLayer', 'NSNotificationQueue', 'UINib', 'GLKTextureLoader', 'HKObjectType', 'NSValue', 'NSMutableIndexSet', 'SKPhysicsContact', 'NSProgress', 'AVPlayerViewController', 'CAScrollLayer', 'GKSavedGame', 'NSTextCheckingResult', 'PHObjectPlaceholder', 'SKConstraint', 'EKEventEditViewController', 'NSEntityDescription', 'NSURLCredentialStorage', 'UIApplication', 'SKDownload', 'SCNNode', 'MKLocalSearchRequest', 'SKScene', 'UISearchDisplayController', 'NEOnDemandRule', 'MTLRenderPassStencilAttachmentDescriptor', 'CAReplicatorLayer', 'UIPrintPageRenderer', 'EKCalendarItem', 'NSUUID', 'EAAccessoryManager', 'NEOnDemandRuleIgnore', 'SKRegion', 'AVAssetResourceLoader', 'EAWiFiUnconfiguredAccessoryBrowser', 'NSUserActivity', 'CTCall', 'UIPrinterPickerController', 'CIVector', 'UINavigationBar', 'UIPanGestureRecognizer', 'MPMediaQuery', 'ABNewPersonViewController', 'CKRecordZoneID', 'HKAnchoredObjectQuery', 'CKFetchRecordZonesOperation', 'UIStoryboardSegue', 'ACAccountType', 'GKSession', 'SKVideoNode', 'PHChange', 'SKReceiptRefreshRequest', 'GCExtendedGamepadSnapshot', 'MPSeekCommandEvent', 'GCExtendedGamepad', 'CAValueFunction', 'SCNCylinder', 'NSNotification', 'NSBatchUpdateResult', 'PKPushCredentials', 'SCNPhysicsSliderJoint', 'AVCaptureDeviceFormat', 'AVPlayerItemErrorLog', 'NSMapTable', 'NSSet', 'CMMotionManager', 'GKVoiceChatService', 'UIPageControl', 'UILexicon', 'MTLArrayType', 'AVAudioUnitReverb', 'MKGeodesicPolyline', 'AVMutableComposition', 'NSLayoutConstraint', 'UIPrinter', 'NSOrderedSet', 'CBAttribute', 'PKPushPayload', 'NSIncrementalStoreNode', 'EKEventStore', 'MPRemoteCommandEvent', 'UISlider', 'UIBlurEffect', 'CKAsset', 'AVCaptureInput', 'AVAudioEngine', 'MTLVertexDescriptor', 'SKPhysicsBody', 'NSOperation', 'PKPaymentPass', 'UIImageAsset', 'MKMapCamera', 'SKProductsResponse', 'GLKEffectPropertyMaterial', 'AVCaptureDevice', 'CTCallCenter', 'CABTMIDILocalPeripheralViewController', 'NEVPNManager', 'HKQuery', 'SCNPhysicsContact', 'CBMutableService', 'AVSampleBufferDisplayLayer', 'SCNSceneSource', 'SKLightNode', 'CKDiscoveredUserInfo', 'NSMutableArray', 'MTLDepthStencilDescriptor', 'MTLArgument', 'NSMassFormatter', 'CIRectangleFeature', 'PKPushRegistry', 'NEVPNConnection', 'MCNearbyServiceBrowser', 'NSOperationQueue', 'MKPolylineRenderer', 'HKWorkout', 'NSValueTransformer', 'UICollectionViewFlowLayout', 'MPChangePlaybackRateCommandEvent', 'NSEntityMapping', 'SKTexture', 'NSMergePolicy', 'UITextInputStringTokenizer', 'NSRecursiveLock', 'AVAsset', 'NSUndoManager', 'AVAudioUnitSampler', 'NSItemProvider', 'SKUniform', 'MPMediaPickerController', 'CKOperation', 'MTLRenderPipelineDescriptor', 'EAWiFiUnconfiguredAccessory', 'NSFileCoordinator', 'SKRequest', 'NSFileHandle', 'NSConditionLock', 'UISegmentedControl', 'NSManagedObjectModel', 'UITabBarItem', 'SCNCone', 'MPMediaItem', 'SCNMaterial', 'EKRecurrenceRule', 'UIEvent', 'UITouch', 'UIPrintInteractionController', 'CMDeviceMotion', 'NEVPNProtocol', 'NSCompoundPredicate', 'HKHealthStore', 'MKMultiPoint', 'HKSampleType', 'UIPrintFormatter', 'AVAudioUnitEQFilterParameters', 'SKView', 'NSConstantString', 'UIPopoverController', 'CKDatabase', 'AVMetadataFaceObject', 'UIAccelerometer', 'EKEventViewController', 'CMAltitudeData', 'MTLStencilDescriptor', 'UISwipeGestureRecognizer', 'NSPort', 'MKCircleRenderer', 'AVCompositionTrack', 'NSAsynchronousFetchRequest', 'NSUbiquitousKeyValueStore', 'NSMetadataQueryResultGroup', 'AVAssetResourceLoadingDataRequest', 'UITableViewHeaderFooterView', 'CKNotificationID', 'AVAudioSession', 'HKUnit', 'NSNull', 'NSPersistentStoreResult', 'MKCircleView', 'AVAudioChannelLayout', 'NEVPNProtocolIKEv2', 'WKProcessPool', 'UIAttachmentBehavior', 'CLBeacon', 'NSInputStream', 'NSURLCache', 'GKPlayer', 'NSMappingModel', 'CIQRCodeFeature', 'AVMutableVideoComposition', 'PHFetchResult', 'NSAttributeDescription', 'AVPlayer', 'MKAnnotationView', 'PKPaymentRequest', 'NSTimer', 'CBDescriptor', 'MKOverlayView', 'AVAudioUnitTimePitch', 'NSSaveChangesRequest', 'UIReferenceLibraryViewController', 'SKPhysicsJointFixed', 'UILocalizedIndexedCollation', 'UIInterpolatingMotionEffect', 'UIDocumentPickerViewController', 'AVAssetWriter', 'NSBundle', 'SKStoreProductViewController', 'GLKViewController', 'NSMetadataQueryAttributeValueTuple', 'GKTurnBasedMatch', 'AVAudioFile', 'UIActivity', 'NSPipe', 'MKShape', 'NSMergeConflict', 'CIImage', 'HKObject', 'UIRotationGestureRecognizer', 'AVPlayerItemLegibleOutput', 'AVAssetImageGenerator', 'GCControllerButtonInput', 'CKMarkNotificationsReadOperation', 'CKSubscription', 'MPTimedMetadata', 'NKIssue', 'UIScreenMode', 'HMAccessoryBrowser', 'GKTurnBasedEventHandler', 'UIWebView', 'MKPolyline', 'JSVirtualMachine', 'AVAssetReader', 'NSAttributedString', 'GKMatchmakerViewController', 'NSCountedSet', 'UIButton', 'WKNavigationResponse', 'GKLocalPlayer', 'MPMovieErrorLog', 'AVSpeechUtterance', 'HKStatistics', 'UILocalNotification', 'HKBiologicalSexObject', 'AVURLAsset', 'CBPeripheral', 'NSDateComponentsFormatter', 'SKSpriteNode', 'UIAccessibilityElement', 'AVAssetWriterInputGroup', 'HMZone', 'AVAssetReaderAudioMixOutput', 'NSEnumerator', 'UIDocument', 'MKLocalSearchResponse', 'UISimpleTextPrintFormatter', 'PHPhotoLibrary', 'CBService', 'UIDocumentMenuViewController', 'MCSession', 'QLPreviewController', 'CAMediaTimingFunction', 'UITextPosition', 'ASIdentifierManager', 'AVAssetResourceLoadingRequest', 'SLComposeServiceViewController', 'UIPinchGestureRecognizer', 'PHObject', 'NSExtensionItem', 'HKSampleQuery', 'MTLRenderPipelineColorAttachmentDescriptorArray', 'MKRouteStep', 'SCNCapsule', 'NSMetadataQuery', 'AVAssetResourceLoadingContentInformationRequest', 'UITraitCollection', 'CTCarrier', 'NSFileSecurity', 'UIAcceleration', 'UIMotionEffect', 'MTLRenderPipelineReflection', 'CLHeading', 'CLVisit', 'MKDirectionsResponse', 'HMAccessory', 'MTLStructType', 'UITextView', 'CMMagnetometerData', 'UICollisionBehavior', 'UIProgressView', 'CKServerChangeToken', 'UISearchBar', 'MKPlacemark', 'AVCaptureConnection', 'NSPropertyMapping', 'ALAssetsFilter', 'SK3DNode', 'AVPlayerItemErrorLogEvent', 'NSJSONSerialization', 'AVAssetReaderVideoCompositionOutput', 'ABPersonViewController', 'CIDetector', 'GKTurnBasedMatchmakerViewController', 'MPMediaItemCollection', 'SCNSphere', 'NSCondition', 'NSURLCredential', 'MIDINetworkConnection', 'NSFileProviderExtension', 'NSDecimalNumberHandler', 'NSAtomicStoreCacheNode', 'NSAtomicStore', 'EKAlarm', 'CKNotificationInfo', 'AVAudioUnitEQ', 'UIPercentDrivenInteractiveTransition', 'MKPolygon', 'AVAssetTrackSegment', 'MTLVertexAttribute', 'NSExpressionDescription', 'HKStatisticsCollectionQuery', 'NSURLAuthenticationChallenge', 'NSDirectoryEnumerator', 'MKDistanceFormatter', 'UIAlertAction', 'NSPropertyListSerialization', 'GKPeerPickerController', 'UIUserNotificationSettings', 'UITableViewController', 'GKNotificationBanner', 'MKPointAnnotation', 'MTLRenderPassColorAttachmentDescriptorArray', 'NSCache', 'SKPhysicsJoint', 'NSXMLParser', 'UIViewController', 'PKPaymentToken', 'MFMessageComposeViewController', 'AVAudioInputNode', 'NSDataDetector', 'CABTMIDICentralViewController', 'AVAudioUnitMIDIInstrument', 'AVCaptureVideoPreviewLayer', 'AVAssetWriterInputPassDescription', 'MPChangePlaybackRateCommand', 'NSURLComponents', 'CAMetalLayer', 'UISnapBehavior', 'AVMetadataMachineReadableCodeObject', 'CKDiscoverUserInfosOperation', 'NSTextAttachment', 'NSException', 'UIMenuItem', 'CMMotionActivityManager', 'SCNGeometryElement', 'NCWidgetController', 'CAEmitterLayer', 'MKUserLocation', 'UIImagePickerController', 'CIFeature', 'AVCaptureDeviceInput', 'ALAsset', 'NSURLSessionDownloadTask', 'SCNPhysicsHingeJoint', 'MPMoviePlayerViewController', 'NSMutableOrderedSet', 'SCNMaterialProperty', 'UIFont', 'AVCaptureVideoDataOutput', 'NSCachedURLResponse', 'ALAssetsLibrary', 'NSInvocation', 'UILongPressGestureRecognizer', 'NSTextStorage', 'WKWebViewConfiguration', 'CIFaceFeature', 'MKMapSnapshot', 'GLKEffectPropertyFog', 'AVComposition', 'CKDiscoverAllContactsOperation', 'AVAudioMixInputParameters', 'CAEmitterBehavior', 'PKPassLibrary', 'UIMutableUserNotificationCategory', 'NSLock', 'NEVPNProtocolIPSec', 'ADBannerView', 'UIDocumentPickerExtensionViewController', 'UIActivityIndicatorView', 'AVPlayerMediaSelectionCriteria', 'CALayer', 'UIAccessibilityCustomAction', 'UIBarButtonItem', 'AVAudioSessionRouteDescription', 'CLBeaconRegion', 'HKBloodTypeObject', 'MTLVertexBufferLayoutDescriptorArray', 'CABasicAnimation', 'AVVideoCompositionInstruction', 'AVMutableTimedMetadataGroup', 'EKRecurrenceEnd', 'NSTextContainer', 'TWTweetComposeViewController', 'PKPaymentAuthorizationViewController', 'UIScrollView', 'WKNavigationAction', 'AVPlayerItemMetadataOutput', 'EKRecurrenceDayOfWeek', 'NSNumberFormatter', 'MTLComputePipelineReflection', 'UIScreen', 'CLRegion', 'NSProcessInfo', 'GLKTextureInfo', 'SCNSkinner', 'AVCaptureMetadataOutput', 'SCNAnimationEvent', 'NSTextTab', 'JSManagedValue', 'NSDate', 'UITextChecker', 'WKBackForwardListItem', 'NSData', 'NSParagraphStyle', 'AVMutableMetadataItem', 'EKCalendar', 'HKWorkoutEvent', 'NSMutableURLRequest', 'UIVideoEditorController', 'HMTimerTrigger', 'AVAudioUnitVarispeed', 'UIDynamicAnimator', 'AVCompositionTrackSegment', 'GCGamepadSnapshot', 'MPMediaEntity', 'GLKSkyboxEffect', 'UISwitch', 'EKStructuredLocation', 'UIGestureRecognizer', 'NSProxy', 'GLKBaseEffect', 'UIPushBehavior', 'GKScoreChallenge', 'NSCoder', 'MPMediaPlaylist', 'NSDateComponents', 'WKUserScript', 'EKEvent', 'NSDateFormatter', 'NSAsynchronousFetchResult', 'AVAssetWriterInputPixelBufferAdaptor', 'UIVisualEffect', 'UICollectionViewCell', 'UITextField', 'CLPlacemark', 'MPPlayableContentManager', 'AVCaptureOutput', 'HMCharacteristicWriteAction', 'CKModifySubscriptionsOperation', 'NSPropertyDescription', 'GCGamepad', 'UIMarkupTextPrintFormatter', 'SCNTube', 'NSPersistentStoreCoordinator', 'AVAudioEnvironmentNode', 'GKMatchmaker', 'CIContext', 'NSThread', 'SLComposeSheetConfigurationItem', 'SKPhysicsJointSliding', 'NSPredicate', 'GKVoiceChat', 'SKCropNode', 'AVCaptureAudioPreviewOutput', 'NSStringDrawingContext', 'GKGameCenterViewController', 'UIPrintPaper', 'SCNPhysicsBallSocketJoint', 'UICollectionViewLayoutInvalidationContext', 'GLKEffectPropertyTransform', 'AVAudioIONode', 'UIDatePicker', 'MKDirections', 'ALAssetsGroup', 'CKRecordZoneNotification', 'SCNScene', 'MPMovieAccessLogEvent', 'CKFetchSubscriptionsOperation', 'CAEmitterCell', 'AVAudioUnitTimeEffect', 'HMCharacteristicMetadata', 'MKPinAnnotationView', 'UIPickerView', 'UIImageView', 'UIUserNotificationCategory', 'SCNPhysicsVehicleWheel', 'HKCategoryType', 'MPMediaQuerySection', 'GKFriendRequestComposeViewController', 'NSError', 'MTLRenderPipelineColorAttachmentDescriptor', 'SCNPhysicsShape', 'UISearchController', 'SCNPhysicsBody', 'CTSubscriberInfo', 'AVPlayerItemAccessLog', 'MPMediaPropertyPredicate', 'CMLogItem', 'NSAutoreleasePool', 'NSSocketPort', 'AVAssetReaderTrackOutput', 'SKNode', 'UIMutableUserNotificationAction', 'SCNProgram', 'AVSpeechSynthesisVoice', 'CMAltimeter', 'AVCaptureAudioChannel', 'GKTurnBasedExchangeReply', 'AVVideoCompositionLayerInstruction', 'AVSpeechSynthesizer', 'GKChallengeEventHandler', 'AVCaptureFileOutput', 'UIControl', 'SCNPhysicsField', 'CKReference', 'LAContext', 'CKRecordID', 'ADInterstitialAd', 'AVAudioSessionDataSourceDescription', 'AVAudioBuffer', 'CIColorKernel', 'GCControllerDirectionPad', 'NSFileManager', 'AVMutableAudioMixInputParameters', 'UIScreenEdgePanGestureRecognizer', 'CAKeyframeAnimation', 'CKQueryNotification', 'PHAdjustmentData', 'EASession', 'AVAssetResourceRenewalRequest', 'UIInputView', 'NSFileWrapper', 'UIResponder', 'NSPointerFunctions', 'UIKeyCommand', 'NSHTTPCookieStorage', 'AVMediaSelectionOption', 'NSRunLoop', 'NSFileAccessIntent', 'CAAnimationGroup', 'MKCircle', 'UIAlertController', 'NSMigrationManager', 'NSDateIntervalFormatter', 'UICollectionViewUpdateItem', 'CKDatabaseOperation', 'PHImageRequestOptions', 'SKReachConstraints', 'CKRecord', 'CAInterAppAudioSwitcherView', 'WKWindowFeatures', 'GKInvite', 'NSMutableData', 'PHAssetCollectionChangeRequest', 'NSMutableParagraphStyle', 'UIDynamicBehavior', 'GLKEffectProperty', 'CKFetchRecordChangesOperation', 'SKShapeNode', 'MPMovieErrorLogEvent', 'MKPolygonView', 'MPContentItem', 'HMAction', 'NSScanner', 'GKAchievementChallenge', 'AVAudioPlayer', 'CKContainer', 'AVVideoComposition', 'NKLibrary', 'NSPersistentStore', 'AVCaptureMovieFileOutput', 'HMRoom', 'GKChallenge', 'UITextRange', 'NSURLProtectionSpace', 'ACAccountStore', 'MPSkipIntervalCommand', 'NSComparisonPredicate', 'HMHome', 'PHVideoRequestOptions', 'NSOutputStream', 'MPSkipIntervalCommandEvent', 'PKAddPassesViewController', 'UITextSelectionRect', 'CTTelephonyNetworkInfo', 'AVTextStyleRule', 'NSFetchedPropertyDescription', 'UIPageViewController', 'CATransformLayer', 'UICollectionViewController', 'AVAudioNode', 'MCNearbyServiceAdvertiser', 'NSObject', 'PHAsset', 'GKLeaderboardViewController', 'CKQueryCursor', 'MPMusicPlayerController', 'MKOverlayPathRenderer', 'CMPedometerData', 'HMService', 'SKFieldNode', 'GKAchievement', 'WKUserContentController', 'AVAssetTrack', 'TWRequest', 'SKLabelNode', 'AVCaptureBracketedStillImageSettings', 'MIDINetworkHost', 'MPMediaPredicate', 'AVFrameRateRange', 'MTLTextureDescriptor', 'MTLVertexBufferLayoutDescriptor', 'MPFeedbackCommandEvent', 'UIUserNotificationAction', 'HKStatisticsQuery', 'SCNParticleSystem', 'NSIndexPath', 'AVVideoCompositionRenderContext', 'CADisplayLink', 'HKObserverQuery', 'UIPopoverPresentationController', 'CKQueryOperation', 'CAEAGLLayer', 'NSMutableString', 'NSMessagePort', 'NSURLQueryItem', 'MTLStructMember', 'AVAudioSessionChannelDescription', 'GLKView', 'UIActivityViewController', 'GKAchievementViewController', 'GKTurnBasedParticipant', 'NSURLProtocol', 'NSUserDefaults', 'NSCalendar', 'SKKeyframeSequence', 'AVMetadataItemFilter', 'CKModifyRecordZonesOperation', 'WKPreferences', 'NSMethodSignature', 'NSRegularExpression', 'EAGLSharegroup', 'AVPlayerItemVideoOutput', 'PHContentEditingInputRequestOptions', 'GKMatch', 'CIColor', 'UIDictationPhrase'} +COCOA_PROTOCOLS = {'SKStoreProductViewControllerDelegate', 'AVVideoCompositionInstruction', 'AVAudioSessionDelegate', 'GKMatchDelegate', 'NSFileManagerDelegate', 'UILayoutSupport', 'NSCopying', 'UIPrintInteractionControllerDelegate', 'QLPreviewControllerDataSource', 'SKProductsRequestDelegate', 'NSTextStorageDelegate', 'MCBrowserViewControllerDelegate', 'MTLComputeCommandEncoder', 'SCNSceneExportDelegate', 'UISearchResultsUpdating', 'MFMailComposeViewControllerDelegate', 'MTLBlitCommandEncoder', 'NSDecimalNumberBehaviors', 'PHContentEditingController', 'NSMutableCopying', 'UIActionSheetDelegate', 'UIViewControllerTransitioningDelegate', 'UIAlertViewDelegate', 'AVAudioPlayerDelegate', 'MKReverseGeocoderDelegate', 'NSCoding', 'UITextInputTokenizer', 'GKFriendRequestComposeViewControllerDelegate', 'UIActivityItemSource', 'NSCacheDelegate', 'UIAdaptivePresentationControllerDelegate', 'GKAchievementViewControllerDelegate', 'UIViewControllerTransitionCoordinator', 'EKEventEditViewDelegate', 'NSURLConnectionDelegate', 'UITableViewDelegate', 'GKPeerPickerControllerDelegate', 'UIGuidedAccessRestrictionDelegate', 'AVSpeechSynthesizerDelegate', 'AVAudio3DMixing', 'AVPlayerItemLegibleOutputPushDelegate', 'ADInterstitialAdDelegate', 'HMAccessoryBrowserDelegate', 'AVAssetResourceLoaderDelegate', 'UITabBarControllerDelegate', 'CKRecordValue', 'SKPaymentTransactionObserver', 'AVCaptureAudioDataOutputSampleBufferDelegate', 'UIInputViewAudioFeedback', 'GKChallengeListener', 'SKSceneDelegate', 'UIPickerViewDelegate', 'UIWebViewDelegate', 'UIApplicationDelegate', 'GKInviteEventListener', 'MPMediaPlayback', 'MyClassJavaScriptMethods', 'AVAsynchronousKeyValueLoading', 'QLPreviewItem', 'SCNBoundingVolume', 'NSPortDelegate', 'UIContentContainer', 'SCNNodeRendererDelegate', 'SKRequestDelegate', 'SKPhysicsContactDelegate', 'HMAccessoryDelegate', 'UIPageViewControllerDataSource', 'SCNSceneRendererDelegate', 'SCNPhysicsContactDelegate', 'MKMapViewDelegate', 'AVPlayerItemOutputPushDelegate', 'UICollectionViewDelegate', 'UIImagePickerControllerDelegate', 'MTLRenderCommandEncoder', 'PKPaymentAuthorizationViewControllerDelegate', 'UIToolbarDelegate', 'WKUIDelegate', 'SCNActionable', 'NSURLConnectionDataDelegate', 'MKOverlay', 'CBCentralManagerDelegate', 'JSExport', 'NSTextLayoutOrientationProvider', 'UIPickerViewDataSource', 'PKPushRegistryDelegate', 'UIViewControllerTransitionCoordinatorContext', 'NSLayoutManagerDelegate', 'MTLLibrary', 'NSFetchedResultsControllerDelegate', 'ABPeoplePickerNavigationControllerDelegate', 'MTLResource', 'NSDiscardableContent', 'UITextFieldDelegate', 'MTLBuffer', 'MTLSamplerState', 'GKGameCenterControllerDelegate', 'MPMediaPickerControllerDelegate', 'UISplitViewControllerDelegate', 'UIAppearance', 'UIPickerViewAccessibilityDelegate', 'UITraitEnvironment', 'UIScrollViewAccessibilityDelegate', 'ADBannerViewDelegate', 'MPPlayableContentDataSource', 'MTLComputePipelineState', 'NSURLSessionDelegate', 'MTLCommandBuffer', 'NSXMLParserDelegate', 'UIViewControllerRestoration', 'UISearchBarDelegate', 'UIBarPositioning', 'CBPeripheralDelegate', 'UISearchDisplayDelegate', 'CAAction', 'PKAddPassesViewControllerDelegate', 'MCNearbyServiceAdvertiserDelegate', 'MTLDepthStencilState', 'GKTurnBasedMatchmakerViewControllerDelegate', 'MPPlayableContentDelegate', 'AVCaptureVideoDataOutputSampleBufferDelegate', 'UIAppearanceContainer', 'UIStateRestoring', 'UITextDocumentProxy', 'MTLDrawable', 'NSURLSessionTaskDelegate', 'NSFilePresenter', 'AVAudioStereoMixing', 'UIViewControllerContextTransitioning', 'UITextInput', 'CBPeripheralManagerDelegate', 'UITextInputDelegate', 'NSFastEnumeration', 'NSURLAuthenticationChallengeSender', 'SCNProgramDelegate', 'AVVideoCompositing', 'SCNAnimatable', 'NSSecureCoding', 'MCAdvertiserAssistantDelegate', 'GKLocalPlayerListener', 'GLKNamedEffect', 'UIPopoverControllerDelegate', 'AVCaptureMetadataOutputObjectsDelegate', 'NSExtensionRequestHandling', 'UITextSelecting', 'UIPrinterPickerControllerDelegate', 'NCWidgetProviding', 'MTLCommandEncoder', 'NSURLProtocolClient', 'MFMessageComposeViewControllerDelegate', 'UIVideoEditorControllerDelegate', 'WKNavigationDelegate', 'GKSavedGameListener', 'UITableViewDataSource', 'MTLFunction', 'EKCalendarChooserDelegate', 'NSUserActivityDelegate', 'UICollisionBehaviorDelegate', 'NSStreamDelegate', 'MCNearbyServiceBrowserDelegate', 'HMHomeDelegate', 'UINavigationControllerDelegate', 'MCSessionDelegate', 'UIDocumentPickerDelegate', 'UIViewControllerInteractiveTransitioning', 'GKTurnBasedEventListener', 'SCNSceneRenderer', 'MTLTexture', 'GLKViewDelegate', 'EAAccessoryDelegate', 'WKScriptMessageHandler', 'PHPhotoLibraryChangeObserver', 'NSKeyedUnarchiverDelegate', 'AVPlayerItemMetadataOutputPushDelegate', 'NSMachPortDelegate', 'SCNShadable', 'UIPopoverBackgroundViewMethods', 'UIDocumentMenuDelegate', 'UIBarPositioningDelegate', 'ABPersonViewControllerDelegate', 'NSNetServiceBrowserDelegate', 'EKEventViewDelegate', 'UIScrollViewDelegate', 'NSURLConnectionDownloadDelegate', 'UIGestureRecognizerDelegate', 'UINavigationBarDelegate', 'AVAudioMixing', 'NSFetchedResultsSectionInfo', 'UIDocumentInteractionControllerDelegate', 'MTLParallelRenderCommandEncoder', 'QLPreviewControllerDelegate', 'UIAccessibilityReadingContent', 'ABUnknownPersonViewControllerDelegate', 'GLKViewControllerDelegate', 'UICollectionViewDelegateFlowLayout', 'UIPopoverPresentationControllerDelegate', 'UIDynamicAnimatorDelegate', 'NSTextAttachmentContainer', 'MKAnnotation', 'UIAccessibilityIdentification', 'UICoordinateSpace', 'ABNewPersonViewControllerDelegate', 'MTLDevice', 'CAMediaTiming', 'AVCaptureFileOutputRecordingDelegate', 'HMHomeManagerDelegate', 'UITextViewDelegate', 'UITabBarDelegate', 'GKLeaderboardViewControllerDelegate', 'UISearchControllerDelegate', 'EAWiFiUnconfiguredAccessoryBrowserDelegate', 'UITextInputTraits', 'MTLRenderPipelineState', 'GKVoiceChatClient', 'UIKeyInput', 'UICollectionViewDataSource', 'SCNTechniqueSupport', 'NSLocking', 'AVCaptureFileOutputDelegate', 'GKChallengeEventHandlerDelegate', 'UIObjectRestoration', 'CIFilterConstructor', 'AVPlayerItemOutputPullDelegate', 'EAGLDrawable', 'AVVideoCompositionValidationHandling', 'UIViewControllerAnimatedTransitioning', 'NSURLSessionDownloadDelegate', 'UIAccelerometerDelegate', 'UIPageViewControllerDelegate', 'MTLCommandQueue', 'UIDataSourceModelAssociation', 'AVAudioRecorderDelegate', 'GKSessionDelegate', 'NSKeyedArchiverDelegate', 'CAMetalDrawable', 'UIDynamicItem', 'CLLocationManagerDelegate', 'NSMetadataQueryDelegate', 'NSNetServiceDelegate', 'GKMatchmakerViewControllerDelegate', 'NSURLSessionDataDelegate'} +COCOA_PRIMITIVES = {'ROTAHeader', '__CFBundle', 'MortSubtable', 'AudioFilePacketTableInfo', 'CGPDFOperatorTable', 'KerxStateEntry', 'ExtendedTempoEvent', 'CTParagraphStyleSetting', 'OpaqueMIDIPort', '_GLKMatrix3', '_GLKMatrix2', '_GLKMatrix4', 'ExtendedControlEvent', 'CAFAudioDescription', 'OpaqueCMBlockBuffer', 'CGTextDrawingMode', 'EKErrorCode', 'gss_buffer_desc_struct', 'AudioUnitParameterInfo', '__SCPreferences', '__CTFrame', '__CTLine', 'AudioFile_SMPTE_Time', 'gss_krb5_lucid_context_v1', 'OpaqueJSValue', 'TrakTableEntry', 'AudioFramePacketTranslation', 'CGImageSource', 'OpaqueJSPropertyNameAccumulator', 'JustPCGlyphRepeatAddAction', '__CFBinaryHeap', 'OpaqueMIDIThruConnection', 'opaqueCMBufferQueue', 'OpaqueMusicSequence', 'MortRearrangementSubtable', 'MixerDistanceParams', 'MorxSubtable', 'MIDIObjectPropertyChangeNotification', 'SFNTLookupSegment', 'CGImageMetadataErrors', 'CGPath', 'OpaqueMIDIEndpoint', 'AudioComponentPlugInInterface', 'gss_ctx_id_t_desc_struct', 'sfntFontFeatureSetting', 'OpaqueJSContextGroup', '__SCNetworkConnection', 'AudioUnitParameterValueTranslation', 'CGImageMetadataType', 'CGPattern', 'AudioFileTypeAndFormatID', 'CGContext', 'AUNodeInteraction', 'SFNTLookupTable', 'JustPCDecompositionAction', 'KerxControlPointHeader', 'AudioStreamPacketDescription', 'KernSubtableHeader', '__SecCertificate', 'AUMIDIOutputCallbackStruct', 'MIDIMetaEvent', 'AudioQueueChannelAssignment', 'AnchorPoint', 'JustTable', '__CFNetService', 'CF_BRIDGED_TYPE', 'gss_krb5_lucid_key', 'CGPDFDictionary', 'KerxSubtableHeader', 'CAF_UUID_ChunkHeader', 'gss_krb5_cfx_keydata', 'OpaqueJSClass', 'CGGradient', 'OpaqueMIDISetup', 'JustPostcompTable', '__CTParagraphStyle', 'AudioUnitParameterHistoryInfo', 'OpaqueJSContext', 'CGShading', 'MIDIThruConnectionParams', 'BslnFormat0Part', 'SFNTLookupSingle', '__CFHost', '__SecRandom', '__CTFontDescriptor', '_NSRange', 'sfntDirectory', 'AudioQueueLevelMeterState', 'CAFPositionPeak', 'PropLookupSegment', '__CVOpenGLESTextureCache', 'sfntInstance', '_GLKQuaternion', 'AnkrTable', '__SCNetworkProtocol', 'CAFFileHeader', 'KerxOrderedListHeader', 'CGBlendMode', 'STXEntryOne', 'CAFRegion', 'SFNTLookupTrimmedArrayHeader', 'SCNMatrix4', 'KerxControlPointEntry', 'OpaqueMusicTrack', '_GLKVector4', 'gss_OID_set_desc_struct', 'OpaqueMusicPlayer', '_CFHTTPAuthentication', 'CGAffineTransform', 'CAFMarkerChunk', 'AUHostIdentifier', 'ROTAGlyphEntry', 'BslnTable', 'gss_krb5_lucid_context_version', '_GLKMatrixStack', 'CGImage', 'KernStateEntry', 'SFNTLookupSingleHeader', 'MortLigatureSubtable', 'CAFUMIDChunk', 'SMPTETime', 'CAFDataChunk', 'CGPDFStream', 'AudioFileRegionList', 'STEntryTwo', 'SFNTLookupBinarySearchHeader', 'OpbdTable', '__CTGlyphInfo', 'BslnFormat2Part', 'KerxIndexArrayHeader', 'TrakTable', 'KerxKerningPair', '__CFBitVector', 'KernVersion0SubtableHeader', 'OpaqueAudioComponentInstance', 'AudioChannelLayout', '__CFUUID', 'MIDISysexSendRequest', '__CFNumberFormatter', 'CGImageSourceStatus', 'AudioFileMarkerList', 'AUSamplerBankPresetData', 'CGDataProvider', 'AudioFormatInfo', '__SecIdentity', 'sfntCMapExtendedSubHeader', 'MIDIChannelMessage', 'KernOffsetTable', 'CGColorSpaceModel', 'MFMailComposeErrorCode', 'CGFunction', '__SecTrust', 'AVAudio3DAngularOrientation', 'CGFontPostScriptFormat', 'KernStateHeader', 'AudioUnitCocoaViewInfo', 'CGDataConsumer', 'OpaqueMIDIDevice', 'KernVersion0Header', 'AnchorPointTable', 'CGImageDestination', 'CAFInstrumentChunk', 'AudioUnitMeterClipping', 'MorxChain', '__CTFontCollection', 'STEntryOne', 'STXEntryTwo', 'ExtendedNoteOnEvent', 'CGColorRenderingIntent', 'KerxSimpleArrayHeader', 'MorxTable', '_GLKVector3', '_GLKVector2', 'MortTable', 'CGPDFBox', 'AudioUnitParameterValueFromString', '__CFSocket', 'ALCdevice_struct', 'MIDINoteMessage', 'sfntFeatureHeader', 'CGRect', '__SCNetworkInterface', '__CFTree', 'MusicEventUserData', 'TrakTableData', 'GCQuaternion', 'MortContextualSubtable', '__CTRun', 'AudioUnitFrequencyResponseBin', 'MortChain', 'MorxInsertionSubtable', 'CGImageMetadata', 'gss_auth_identity', 'AudioUnitMIDIControlMapping', 'CAFChunkHeader', 'CGImagePropertyOrientation', 'CGPDFScanner', 'OpaqueMusicEventIterator', 'sfntDescriptorHeader', 'AudioUnitNodeConnection', 'OpaqueMIDIDeviceList', 'ExtendedAudioFormatInfo', 'BslnFormat1Part', 'sfntFontDescriptor', 'KernSimpleArrayHeader', '__CFRunLoopObserver', 'CGPatternTiling', 'MIDINotification', 'MorxLigatureSubtable', 'MessageComposeResult', 'MIDIThruConnectionEndpoint', 'MusicDeviceStdNoteParams', 'opaqueCMSimpleQueue', 'ALCcontext_struct', 'OpaqueAudioQueue', 'PropLookupSingle', 'CGInterpolationQuality', 'CGColor', 'AudioOutputUnitStartAtTimeParams', 'gss_name_t_desc_struct', 'CGFunctionCallbacks', 'CAFPacketTableHeader', 'AudioChannelDescription', 'sfntFeatureName', 'MorxContextualSubtable', 'CVSMPTETime', 'AudioValueRange', 'CGTextEncoding', 'AudioStreamBasicDescription', 'AUNodeRenderCallback', 'AudioPanningInfo', 'KerxOrderedListEntry', '__CFAllocator', 'OpaqueJSPropertyNameArray', '__SCDynamicStore', 'OpaqueMIDIEntity', '__CTRubyAnnotation', 'SCNVector4', 'CFHostClientContext', 'CFNetServiceClientContext', 'AudioUnitPresetMAS_SettingData', 'opaqueCMBufferQueueTriggerToken', 'AudioUnitProperty', 'CAFRegionChunk', 'CGPDFString', '__GLsync', '__CFStringTokenizer', 'JustWidthDeltaEntry', 'sfntVariationAxis', '__CFNetDiagnostic', 'CAFOverviewSample', 'sfntCMapEncoding', 'CGVector', '__SCNetworkService', 'opaqueCMSampleBuffer', 'AUHostVersionIdentifier', 'AudioBalanceFade', 'sfntFontRunFeature', 'KerxCoordinateAction', 'sfntCMapSubHeader', 'CVPlanarPixelBufferInfo', 'AUNumVersion', 'AUSamplerInstrumentData', 'AUPreset', '__CTRunDelegate', 'OpaqueAudioQueueProcessingTap', 'KerxTableHeader', '_NSZone', 'OpaqueExtAudioFile', '__CFRunLoopSource', '__CVMetalTextureCache', 'KerxAnchorPointAction', 'OpaqueJSString', 'AudioQueueParameterEvent', '__CFHTTPMessage', 'OpaqueCMClock', 'ScheduledAudioFileRegion', 'STEntryZero', 'AVAudio3DPoint', 'gss_channel_bindings_struct', 'sfntVariationHeader', 'AUChannelInfo', 'UIOffset', 'GLKEffectPropertyPrv', 'KerxStateHeader', 'CGLineJoin', 'CGPDFDocument', '__CFBag', 'KernOrderedListHeader', '__SCNetworkSet', '__SecKey', 'MIDIObjectAddRemoveNotification', 'AudioUnitParameter', 'JustPCActionSubrecord', 'AudioComponentDescription', 'AudioUnitParameterValueName', 'AudioUnitParameterEvent', 'KerxControlPointAction', 'AudioTimeStamp', 'KernKerningPair', 'gss_buffer_set_desc_struct', 'MortFeatureEntry', 'FontVariation', 'CAFStringID', 'LcarCaretClassEntry', 'AudioUnitParameterStringFromValue', 'ACErrorCode', 'ALMXGlyphEntry', 'LtagTable', '__CTTypesetter', 'AuthorizationOpaqueRef', 'UIEdgeInsets', 'CGPathElement', 'CAFMarker', 'KernTableHeader', 'NoteParamsControlValue', 'SSLContext', 'gss_cred_id_t_desc_struct', 'AudioUnitParameterNameInfo', 'CGDataConsumerCallbacks', 'ALMXHeader', 'CGLineCap', 'MIDIControlTransform', 'CGPDFArray', '__SecPolicy', 'AudioConverterPrimeInfo', '__CTTextTab', '__CFNetServiceMonitor', 'AUInputSamplesInOutputCallbackStruct', '__CTFramesetter', 'CGPDFDataFormat', 'STHeader', 'CVPlanarPixelBufferInfo_YCbCrPlanar', 'MIDIValueMap', 'JustDirectionTable', '__SCBondStatus', 'SFNTLookupSegmentHeader', 'OpaqueCMMemoryPool', 'CGPathDrawingMode', 'CGFont', '__SCNetworkReachability', 'AudioClassDescription', 'CGPoint', 'AVAudio3DVectorOrientation', 'CAFStrings', '__CFNetServiceBrowser', 'opaqueMTAudioProcessingTap', 'sfntNameRecord', 'CGPDFPage', 'CGLayer', 'ComponentInstanceRecord', 'CAFInfoStrings', 'HostCallbackInfo', 'MusicDeviceNoteParams', 'OpaqueVTCompressionSession', 'KernIndexArrayHeader', 'CVPlanarPixelBufferInfo_YCbCrBiPlanar', 'MusicTrackLoopInfo', 'opaqueCMFormatDescription', 'STClassTable', 'sfntDirectoryEntry', 'OpaqueCMTimebase', 'CGDataProviderDirectCallbacks', 'MIDIPacketList', 'CAFOverviewChunk', 'MIDIPacket', 'ScheduledAudioSlice', 'CGDataProviderSequentialCallbacks', 'AudioBuffer', 'MorxRearrangementSubtable', 'CGPatternCallbacks', 'AUDistanceAttenuationData', 'MIDIIOErrorNotification', 'CGPDFContentStream', 'IUnknownVTbl', 'MIDITransform', 'MortInsertionSubtable', 'CABarBeatTime', 'AudioBufferList', '__CVBuffer', 'AURenderCallbackStruct', 'STXEntryZero', 'JustPCDuctilityAction', 'OpaqueAudioQueueTimeline', 'VTDecompressionOutputCallbackRecord', 'OpaqueMIDIClient', '__CFPlugInInstance', 'AudioQueueBuffer', '__CFFileDescriptor', 'AudioUnitConnection', '_GKTurnBasedExchangeStatus', 'LcarCaretTable', 'CVPlanarComponentInfo', 'JustWidthDeltaGroup', 'OpaqueAudioComponent', 'ParameterEvent', '__CVPixelBufferPool', '__CTFont', 'CGColorSpace', 'CGSize', 'AUDependentParameter', 'MIDIDriverInterface', 'gss_krb5_rfc1964_keydata', '__CFDateFormatter', 'LtagStringRange', 'OpaqueVTDecompressionSession', 'gss_iov_buffer_desc_struct', 'AUPresetEvent', 'PropTable', 'KernOrderedListEntry', 'CF_BRIDGED_MUTABLE_TYPE', 'gss_OID_desc_struct', 'AudioUnitPresetMAS_Settings', 'AudioFileMarker', 'JustPCConditionalAddAction', 'BslnFormat3Part', '__CFNotificationCenter', 'MortSwashSubtable', 'AUParameterMIDIMapping', 'SCNVector3', 'OpaqueAudioConverter', 'MIDIRawData', 'sfntNameHeader', '__CFRunLoop', 'MFMailComposeResult', 'CATransform3D', 'OpbdSideValues', 'CAF_SMPTE_Time', '__SecAccessControl', 'JustPCAction', 'OpaqueVTFrameSilo', 'OpaqueVTMultiPassStorage', 'CGPathElementType', 'AudioFormatListItem', 'AudioUnitExternalBuffer', 'AudioFileRegion', 'AudioValueTranslation', 'CGImageMetadataTag', 'CAFPeakChunk', 'AudioBytePacketTranslation', 'sfntCMapHeader', '__CFURLEnumerator', 'STXHeader', 'CGPDFObjectType', 'SFNTLookupArrayHeader'} if __name__ == '__main__': # pragma: no cover import os diff --git a/pygments/lexers/_csound_builtins.py b/pygments/lexers/_csound_builtins.py index 16a4839..72e1fe3 100644 --- a/pygments/lexers/_csound_builtins.py +++ b/pygments/lexers/_csound_builtins.py @@ -7,48 +7,52 @@ :license: BSD, see LICENSE for details. """ -# Opcodes in Csound 6.12.0 at commit 6ca322bd31f1ca907c008616b40a5f237ff449db using -# python -c " -# import re, subprocess -# output = subprocess.Popen(['csound', '--list-opcodes0'], stderr=subprocess.PIPE).communicate()[1] -# opcodes = output[re.search(r'^$', output, re.M).end():re.search(r'^\d+ opcodes$', output, re.M).start()].split() -# output = subprocess.Popen(['csound', '--list-opcodes2'], stderr=subprocess.PIPE).communicate()[1] -# all_opcodes = output[re.search(r'^$', output, re.M).end():re.search(r'^\d+ opcodes$', output, re.M).start()].split() +# Opcodes in Csound 6.13.0 using: +# python3 -c " +# import re +# from subprocess import Popen, PIPE +# output = Popen(['csound', '--list-opcodes0'], stderr=PIPE, text=True).communicate()[1] +# opcodes = output[re.search(r'^\$', output, re.M).end() : re.search(r'^\d+ opcodes\$', output, re.M).start()].split() +# output = Popen(['csound', '--list-opcodes2'], stderr=PIPE, text=True).communicate()[1] +# all_opcodes = output[re.search(r'^\$', output, re.M).end() : re.search(r'^\d+ opcodes\$', output, re.M).start()].split() # deprecated_opcodes = [opcode for opcode in all_opcodes if opcode not in opcodes] -# print '''OPCODES = set(\''' -# {} +# # Remove opcodes that csound.py treats as keywords. +# keyword_opcodes = [ +# 'cggoto', # https://csound.com/docs/manual/cggoto.html +# 'cigoto', # https://csound.com/docs/manual/cigoto.html +# 'cingoto', # (undocumented) +# 'ckgoto', # https://csound.com/docs/manual/ckgoto.html +# 'cngoto', # https://csound.com/docs/manual/cngoto.html +# 'cnkgoto', # (undocumented) +# 'endin', # https://csound.com/docs/manual/endin.html +# 'endop', # https://csound.com/docs/manual/endop.html +# 'goto', # https://csound.com/docs/manual/goto.html +# 'igoto', # https://csound.com/docs/manual/igoto.html +# 'instr', # https://csound.com/docs/manual/instr.html +# 'kgoto', # https://csound.com/docs/manual/kgoto.html +# 'loop_ge', # https://csound.com/docs/manual/loop_ge.html +# 'loop_gt', # https://csound.com/docs/manual/loop_gt.html +# 'loop_le', # https://csound.com/docs/manual/loop_le.html +# 'loop_lt', # https://csound.com/docs/manual/loop_lt.html +# 'opcode', # https://csound.com/docs/manual/opcode.html +# 'reinit', # https://csound.com/docs/manual/reinit.html +# 'return', # https://csound.com/docs/manual/return.html +# 'rireturn', # https://csound.com/docs/manual/rireturn.html +# 'rigoto', # https://csound.com/docs/manual/rigoto.html +# 'tigoto', # https://csound.com/docs/manual/tigoto.html +# 'timout' # https://csound.com/docs/manual/timout.html +# ] +# opcodes = [opcode for opcode in opcodes if opcode not in keyword_opcodes] +# newline = '\n' +# print(f'''OPCODES = set(\''' +# {newline.join(opcodes)} # \'''.split()) # # DEPRECATED_OPCODES = set(\''' -# {} +# {newline.join(deprecated_opcodes)} # \'''.split()) -# '''.format('\n'.join(opcodes), '\n'.join(deprecated_opcodes)) +# ''') # " -# except for -# cggoto csound.com/docs/manual/cggoto.html -# cigoto csound.com/docs/manual/cigoto.html -# cingoto (undocumented) -# ckgoto csound.com/docs/manual/ckgoto.html -# cngoto csound.com/docs/manual/cngoto.html -# cnkgoto (undocumented) -# endin csound.com/docs/manual/endin.html -# endop csound.com/docs/manual/endop.html -# goto csound.com/docs/manual/goto.html -# igoto csound.com/docs/manual/igoto.html -# instr csound.com/docs/manual/instr.html -# kgoto csound.com/docs/manual/kgoto.html -# loop_ge csound.com/docs/manual/loop_ge.html -# loop_gt csound.com/docs/manual/loop_gt.html -# loop_le csound.com/docs/manual/loop_le.html -# loop_lt csound.com/docs/manual/loop_lt.html -# opcode csound.com/docs/manual/opcode.html -# reinit csound.com/docs/manual/reinit.html -# return csound.com/docs/manual/return.html -# rireturn csound.com/docs/manual/rireturn.html -# rigoto csound.com/docs/manual/rigoto.html -# tigoto csound.com/docs/manual/tigoto.html -# timout csound.com/docs/manual/timout.html -# which are treated as keywords in csound.py. OPCODES = set(''' ATSadd @@ -169,8 +173,8 @@ STKBowed STKBrass STKClarinet STKDrummer -STKFlute STKFMVoices +STKFlute STKHevyMetl STKMandolin STKModalBar @@ -201,6 +205,7 @@ alwayson ampdb ampdbfs ampmidi +ampmidicurve ampmidid areson aresonk @@ -249,7 +254,6 @@ centroid ceps cepsinv chanctrl -changed changed2 chani chano @@ -418,6 +422,17 @@ flashtxt flooper flooper2 floor +fluidAllOut +fluidCCi +fluidCCk +fluidControl +fluidEngine +fluidInfo +fluidLoad +fluidNote +fluidOut +fluidProgramSelect +fluidSetInterpMethod fmanal fmax fmb3 @@ -492,6 +507,7 @@ grain grain2 grain3 granule +gtf guiro harmon harmon2 @@ -599,6 +615,10 @@ la_i_multiply_mc la_i_multiply_mr la_i_multiply_vc la_i_multiply_vr +la_i_norm1_mc +la_i_norm1_mr +la_i_norm1_vc +la_i_norm1_vr la_i_norm_euclid_mc la_i_norm_euclid_mr la_i_norm_euclid_vc @@ -609,10 +629,6 @@ la_i_norm_inf_vc la_i_norm_inf_vr la_i_norm_max_mc la_i_norm_max_mr -la_i_norm1_mc -la_i_norm1_mr -la_i_norm1_vc -la_i_norm1_vr la_i_print_mc la_i_print_mr la_i_print_vc @@ -697,6 +713,10 @@ la_k_multiply_mc la_k_multiply_mr la_k_multiply_vc la_k_multiply_vr +la_k_norm1_mc +la_k_norm1_mr +la_k_norm1_vc +la_k_norm1_vr la_k_norm_euclid_mc la_k_norm_euclid_mr la_k_norm_euclid_vc @@ -707,10 +727,6 @@ la_k_norm_inf_vc la_k_norm_inf_vr la_k_norm_max_mc la_k_norm_max_mr -la_k_norm1_mc -la_k_norm1_mr -la_k_norm1_vc -la_k_norm1_vr la_k_qr_eigen_mc la_k_qr_eigen_mr la_k_qr_factor_mc @@ -900,6 +916,8 @@ nrpn nsamp nstance nstrnum +nstrstr +ntof ntom ntrpol nxtpow2 @@ -1030,7 +1048,6 @@ pset ptable ptable3 ptablei -ptableiw ptablew ptrack puts @@ -1337,6 +1354,7 @@ strfromurl strget strindex strindexk +string2array strlen strlenk strlower @@ -1380,7 +1398,6 @@ tableicopy tableigpw tableikt tableimix -tableiw tablekt tablemix tableng @@ -1589,6 +1606,7 @@ DEPRECATED_OPCODES = set(''' array bformdec bformenc +changed copy2ftab copy2ttab hrtfer @@ -1598,6 +1616,7 @@ maxtab mintab pop pop_f +ptableiw push push_f scalet @@ -1616,6 +1635,7 @@ spectrum stack sumtab tabgen +tableiw tabmap tabmap_i tabslice diff --git a/pygments/lexers/_lua_builtins.py b/pygments/lexers/_lua_builtins.py index b2b46a6..ca3acb1 100644 --- a/pygments/lexers/_lua_builtins.py +++ b/pygments/lexers/_lua_builtins.py @@ -288,7 +288,7 @@ if __name__ == '__main__': # pragma: no cover print('>> %s' % full_function_name) m = get_function_module(full_function_name) modules.setdefault(m, []).append(full_function_name) - modules = dict((k, tuple(v)) for k, v in modules.iteritems()) + modules = {k: tuple(v) for k, v in modules.iteritems()} regenerate(__file__, modules) diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py index ce1b6df..acb71ad 100644 --- a/pygments/lexers/_mapping.py +++ b/pygments/lexers/_mapping.py @@ -65,7 +65,6 @@ LEXERS = { 'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)), 'BoogieLexer': ('pygments.lexers.verification', 'Boogie', ('boogie',), ('*.bpl',), ()), 'BrainfuckLexer': ('pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)), - 'BroLexer': ('pygments.lexers.dsls', 'Bro', ('bro',), ('*.bro',), ()), 'BugsLexer': ('pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()), 'CAmkESLexer': ('pygments.lexers.esoteric', 'CAmkES', ('camkes', 'idl4'), ('*.camkes', '*.idl4'), ()), 'CLexer': ('pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc'), ('text/x-chdr', 'text/x-csrc')), @@ -146,6 +145,7 @@ LEXERS = { 'ElixirLexer': ('pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.exs'), ('text/x-elixir',)), 'ElmLexer': ('pygments.lexers.elm', 'Elm', ('elm',), ('*.elm',), ('text/x-elm',)), 'EmacsLispLexer': ('pygments.lexers.lisp', 'EmacsLisp', ('emacs', 'elisp', 'emacs-lisp'), ('*.el',), ('text/x-elisp', 'application/x-elisp')), + 'EmailLexer': ('pygments.lexers.email', 'E-mail', ('email', 'eml'), ('*.eml',), ('message/rfc822',)), 'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)), 'ErlangLexer': ('pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)), 'ErlangShellLexer': ('pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)), @@ -255,6 +255,7 @@ LEXERS = { 'LogosLexer': ('pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)), 'LogtalkLexer': ('pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)), 'LuaLexer': ('pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')), + 'MIMELexer': ('pygments.lexers.mime', 'MIME', ('mime',), (), ('multipart/mixed', 'multipart/related', 'multipart/alternative')), 'MOOCodeLexer': ('pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)), 'MSDOSSessionLexer': ('pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()), 'MakefileLexer': ('pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)), @@ -304,6 +305,7 @@ LEXERS = { 'NimrodLexer': ('pygments.lexers.nimrod', 'Nimrod', ('nim', 'nimrod'), ('*.nim', '*.nimrod'), ('text/x-nim',)), 'NitLexer': ('pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()), 'NixLexer': ('pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)), + 'NotmuchLexer': ('pygments.lexers.textfmts', 'Notmuch', ('notmuch',), (), ()), 'NuSMVLexer': ('pygments.lexers.smv', 'NuSMV', ('nusmv',), ('*.smv',), ()), 'NumPyLexer': ('pygments.lexers.python', 'NumPy', ('numpy',), (), ()), 'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)), @@ -341,11 +343,11 @@ LEXERS = { 'PugLexer': ('pygments.lexers.html', 'Pug', ('pug', 'jade'), ('*.pug', '*.jade'), ('text/x-pug', 'text/x-jade')), 'PuppetLexer': ('pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()), 'PyPyLogLexer': ('pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)), - 'Python3Lexer': ('pygments.lexers.python', 'Python 3', ('python3', 'py3'), (), ('text/x-python3', 'application/x-python3')), - 'Python3TracebackLexer': ('pygments.lexers.python', 'Python 3.0 Traceback', ('py3tb',), ('*.py3tb',), ('text/x-python3-traceback',)), + 'Python2Lexer': ('pygments.lexers.python', 'Python 2.x', ('python2', 'py2'), (), ('text/x-python2', 'application/x-python2')), + 'Python2TracebackLexer': ('pygments.lexers.python', 'Python 2.x Traceback', ('py2tb',), ('*.py2tb',), ('text/x-python2-traceback',)), 'PythonConsoleLexer': ('pygments.lexers.python', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)), - 'PythonLexer': ('pygments.lexers.python', 'Python', ('python', 'py', 'sage'), ('*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac', '*.sage'), ('text/x-python', 'application/x-python')), - 'PythonTracebackLexer': ('pygments.lexers.python', 'Python Traceback', ('pytb',), ('*.pytb',), ('text/x-python-traceback',)), + 'PythonLexer': ('pygments.lexers.python', 'Python', ('python', 'py', 'sage', 'python3', 'py3'), ('*.py', '*.pyw', '*.jy', '*.sage', '*.sc', 'SConstruct', 'SConscript', '*.bzl', 'BUCK', 'BUILD', 'BUILD.bazel', 'WORKSPACE', '*.tac'), ('text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3')), + 'PythonTracebackLexer': ('pygments.lexers.python', 'Python Traceback', ('pytb', 'py3tb'), ('*.pytb', '*.py3tb'), ('text/x-python-traceback', 'text/x-python3-traceback')), 'QBasicLexer': ('pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)), 'QVToLexer': ('pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()), 'QmlLexer': ('pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')), @@ -367,12 +369,12 @@ LEXERS = { 'RedLexer': ('pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')), 'RedcodeLexer': ('pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()), 'RegeditLexer': ('pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)), - 'ResourceLexer': ('pygments.lexers.resource', 'ResourceBundle', ('resource', 'resourcebundle'), ('*.txt',), ()), + 'ResourceLexer': ('pygments.lexers.resource', 'ResourceBundle', ('resource', 'resourcebundle'), (), ()), 'RexxLexer': ('pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)), 'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)), 'RoboconfGraphLexer': ('pygments.lexers.roboconf', 'Roboconf Graph', ('roboconf-graph',), ('*.graph',), ()), 'RoboconfInstancesLexer': ('pygments.lexers.roboconf', 'Roboconf Instances', ('roboconf-instances',), ('*.instances',), ()), - 'RobotFrameworkLexer': ('pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.txt', '*.robot'), ('text/x-robotframework',)), + 'RobotFrameworkLexer': ('pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.robot',), ('text/x-robotframework',)), 'RqlLexer': ('pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)), 'RslLexer': ('pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)), 'RstLexer': ('pygments.lexers.markup', 'reStructuredText', ('rst', 'rest', 'restructuredtext'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')), @@ -387,9 +389,11 @@ LEXERS = { 'SassLexer': ('pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)), 'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)), 'ScamlLexer': ('pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)), + 'ScdocLexer': ('pygments.lexers.scdoc', 'scdoc', ('scdoc', 'scd'), ('*.scd', '*.scdoc'), ()), 'SchemeLexer': ('pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')), 'ScilabLexer': ('pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)), 'ScssLexer': ('pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)), + 'ShExCLexer': ('pygments.lexers.rdf', 'ShExC', ('shexc', 'shex'), ('*.shex',), ('text/shex',)), 'ShenLexer': ('pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')), 'SilverLexer': ('pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()), 'SlashLexer': ('pygments.lexers.slash', 'Slash', ('slash',), ('*.sl',), ()), @@ -401,6 +405,7 @@ LEXERS = { 'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)), 'SnobolLexer': ('pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)), 'SnowballLexer': ('pygments.lexers.dsls', 'Snowball', ('snowball',), ('*.sbl',), ()), + 'SolidityLexer': ('pygments.lexers.solidity', 'Solidity', ('solidity',), ('*.sol',), ()), 'SourcePawnLexer': ('pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)), 'SourcesListLexer': ('pygments.lexers.installers', 'Debian Sourcelist', ('sourceslist', 'sources.list', 'debsources'), ('sources.list',), ()), 'SparqlLexer': ('pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)), @@ -471,7 +476,9 @@ LEXERS = { 'XtlangLexer': ('pygments.lexers.lisp', 'xtlang', ('extempore',), ('*.xtm',), ()), 'YamlJinjaLexer': ('pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls',), ('text/x-yaml+jinja', 'text/x-sls')), 'YamlLexer': ('pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)), + 'ZeekLexer': ('pygments.lexers.dsls', 'Zeek', ('zeek', 'bro'), ('*.zeek', '*.bro'), ()), 'ZephirLexer': ('pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()), + 'ZigLexer': ('pygments.lexers.zig', 'Zig', ('zig',), ('*.zig',), ('text/zig',)), } if __name__ == '__main__': # pragma: no cover diff --git a/pygments/lexers/_mql_builtins.py b/pygments/lexers/_mql_builtins.py index 8c80379..e59fd91 100644 --- a/pygments/lexers/_mql_builtins.py +++ b/pygments/lexers/_mql_builtins.py @@ -884,7 +884,7 @@ constants = ( 'PERIOD_W1', 'POINTER_AUTOMATIC', 'POINTER_DYNAMIC', - 'POINTER_INVALID' + 'POINTER_INVALID', 'PRICE_CLOSE', 'PRICE_HIGH', 'PRICE_LOW', diff --git a/pygments/lexers/asm.py b/pygments/lexers/asm.py index 75d65f9..32ac936 100644 --- a/pygments/lexers/asm.py +++ b/pygments/lexers/asm.py @@ -37,6 +37,7 @@ class GasLexer(RegexLexer): char = r'[\w$.@-]' identifier = r'(?:[a-zA-Z$_]' + char + r'*|\.' + char + '+)' number = r'(?:0[xX][a-zA-Z0-9]+|\d+)' + register = '%' + identifier tokens = { 'root': [ @@ -52,6 +53,7 @@ class GasLexer(RegexLexer): (string, String), ('@' + identifier, Name.Attribute), (number, Number.Integer), + (register, Name.Variable), (r'[\r\n]+', Text, '#pop'), (r'[;#].*?\n', Comment, '#pop'), @@ -72,7 +74,7 @@ class GasLexer(RegexLexer): (identifier, Name.Constant), (number, Number.Integer), # Registers - ('%' + identifier, Name.Variable), + (register, Name.Variable), # Numeric constants ('$'+number, Number.Integer), (r"$'(.|\\')'", String.Char), @@ -455,6 +457,10 @@ class NasmLexer(RegexLexer): filenames = ['*.asm', '*.ASM'] mimetypes = ['text/x-nasm'] + # Tasm uses the same file endings, but TASM is not as common as NASM, so + # we prioritize NASM higher by default + priority = 1.0 + identifier = r'[a-z$._?][\w$.?#@~]*' hexn = r'(?:0x[0-9a-f]+|$0[0-9a-f]*|[0-9]+[0-9a-f]*h)' octn = r'[0-7]+q' @@ -520,6 +526,11 @@ class NasmLexer(RegexLexer): ], } + def analyse_text(text): + # Probably TASM + if re.match(r'PROC', text, re.IGNORECASE): + return False + class NasmObjdumpLexer(ObjdumpLexer): """ @@ -614,6 +625,11 @@ class TasmLexer(RegexLexer): ], } + def analyse_text(text): + # See above + if re.match(r'PROC', text, re.I): + return True + class Ca65Lexer(RegexLexer): """ diff --git a/pygments/lexers/bibtex.py b/pygments/lexers/bibtex.py index 855254e..6d15c23 100644 --- a/pygments/lexers/bibtex.py +++ b/pygments/lexers/bibtex.py @@ -33,7 +33,7 @@ class BibTeXLexer(ExtendedRegexLexer): flags = re.IGNORECASE ALLOWED_CHARS = r'@!$&*+\-./:;<>?\[\\\]^`|~' - IDENTIFIER = '[{0}][{1}]*'.format('a-z_' + ALLOWED_CHARS, r'\w' + ALLOWED_CHARS) + IDENTIFIER = '[{}][{}]*'.format('a-z_' + ALLOWED_CHARS, r'\w' + ALLOWED_CHARS) def open_brace_callback(self, match, ctx): opening_brace = match.group() diff --git a/pygments/lexers/c_cpp.py b/pygments/lexers/c_cpp.py index e676a8a..5d84a37 100644 --- a/pygments/lexers/c_cpp.py +++ b/pygments/lexers/c_cpp.py @@ -144,21 +144,21 @@ class CFamilyLexer(RegexLexer): ] } - stdlib_types = set(( + stdlib_types = { 'size_t', 'ssize_t', 'off_t', 'wchar_t', 'ptrdiff_t', 'sig_atomic_t', 'fpos_t', 'clock_t', 'time_t', 'va_list', 'jmp_buf', 'FILE', 'DIR', 'div_t', 'ldiv_t', - 'mbstate_t', 'wctrans_t', 'wint_t', 'wctype_t')) - c99_types = set(( + 'mbstate_t', 'wctrans_t', 'wint_t', 'wctype_t'} + c99_types = { '_Bool', '_Complex', 'int8_t', 'int16_t', 'int32_t', 'int64_t', 'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t', 'int_least8_t', 'int_least16_t', 'int_least32_t', 'int_least64_t', 'uint_least8_t', 'uint_least16_t', 'uint_least32_t', 'uint_least64_t', 'int_fast8_t', 'int_fast16_t', 'int_fast32_t', 'int_fast64_t', 'uint_fast8_t', 'uint_fast16_t', 'uint_fast32_t', 'uint_fast64_t', - 'intptr_t', 'uintptr_t', 'intmax_t', 'uintmax_t')) - linux_types = set(( + 'intptr_t', 'uintptr_t', 'intmax_t', 'uintmax_t'} + linux_types = { 'clockid_t', 'cpu_set_t', 'cpumask_t', 'dev_t', 'gid_t', 'id_t', 'ino_t', 'key_t', 'mode_t', 'nfds_t', 'pid_t', 'rlim_t', 'sig_t', 'sighandler_t', 'siginfo_t', - 'sigset_t', 'sigval_t', 'socklen_t', 'timer_t', 'uid_t')) + 'sigset_t', 'sigval_t', 'socklen_t', 'timer_t', 'uid_t'} def __init__(self, **options): self.stdlibhighlighting = get_bool_opt(options, 'stdlibhighlighting', True) diff --git a/pygments/lexers/c_like.py b/pygments/lexers/c_like.py index cc88dfb..82dee35 100644 --- a/pygments/lexers/c_like.py +++ b/pygments/lexers/c_like.py @@ -291,23 +291,23 @@ class CudaLexer(CLexer): aliases = ['cuda', 'cu'] mimetypes = ['text/x-cuda'] - function_qualifiers = set(('__device__', '__global__', '__host__', - '__noinline__', '__forceinline__')) - variable_qualifiers = set(('__device__', '__constant__', '__shared__', - '__restrict__')) - vector_types = set(('char1', 'uchar1', 'char2', 'uchar2', 'char3', 'uchar3', - 'char4', 'uchar4', 'short1', 'ushort1', 'short2', 'ushort2', - 'short3', 'ushort3', 'short4', 'ushort4', 'int1', 'uint1', - 'int2', 'uint2', 'int3', 'uint3', 'int4', 'uint4', 'long1', - 'ulong1', 'long2', 'ulong2', 'long3', 'ulong3', 'long4', - 'ulong4', 'longlong1', 'ulonglong1', 'longlong2', - 'ulonglong2', 'float1', 'float2', 'float3', 'float4', - 'double1', 'double2', 'dim3')) - variables = set(('gridDim', 'blockIdx', 'blockDim', 'threadIdx', 'warpSize')) - functions = set(('__threadfence_block', '__threadfence', '__threadfence_system', - '__syncthreads', '__syncthreads_count', '__syncthreads_and', - '__syncthreads_or')) - execution_confs = set(('<<<', '>>>')) + function_qualifiers = {'__device__', '__global__', '__host__', + '__noinline__', '__forceinline__'} + variable_qualifiers = {'__device__', '__constant__', '__shared__', + '__restrict__'} + vector_types = {'char1', 'uchar1', 'char2', 'uchar2', 'char3', 'uchar3', + 'char4', 'uchar4', 'short1', 'ushort1', 'short2', 'ushort2', + 'short3', 'ushort3', 'short4', 'ushort4', 'int1', 'uint1', + 'int2', 'uint2', 'int3', 'uint3', 'int4', 'uint4', 'long1', + 'ulong1', 'long2', 'ulong2', 'long3', 'ulong3', 'long4', + 'ulong4', 'longlong1', 'ulonglong1', 'longlong2', + 'ulonglong2', 'float1', 'float2', 'float3', 'float4', + 'double1', 'double2', 'dim3'} + variables = {'gridDim', 'blockIdx', 'blockDim', 'threadIdx', 'warpSize'} + functions = {'__threadfence_block', '__threadfence', '__threadfence_system', + '__syncthreads', '__syncthreads_count', '__syncthreads_and', + '__syncthreads_or'} + execution_confs = {'<<<', '>>>'} def get_tokens_unprocessed(self, text): for index, token, value in CLexer.get_tokens_unprocessed(self, text): @@ -352,7 +352,7 @@ class SwigLexer(CppLexer): } # This is a far from complete set of SWIG directives - swig_directives = set(( + swig_directives = { # Most common directives '%apply', '%define', '%director', '%enddef', '%exception', '%extend', '%feature', '%fragment', '%ignore', '%immutable', '%import', '%include', @@ -371,7 +371,7 @@ class SwigLexer(CppLexer): '%pythoncallback', '%pythoncode', '%pythondynamic', '%pythonmaybecall', '%pythonnondynamic', '%pythonprepend', '%refobject', '%shadow', '%sizeof', '%trackobjects', '%types', '%unrefobject', '%varargs', '%warn', - '%warnfilter')) + '%warnfilter'} def analyse_text(text): rv = 0 @@ -429,13 +429,13 @@ class ArduinoLexer(CppLexer): mimetypes = ['text/x-arduino'] # Language sketch main structure functions - structure = set(('setup', 'loop')) + structure = {'setup', 'loop'} # Language operators - operators = set(('not', 'or', 'and', 'xor')) + operators = {'not', 'or', 'and', 'xor'} # Language 'variables' - variables = set(( + variables = { 'DIGITAL_MESSAGE', 'FIRMATA_STRING', 'ANALOG_MESSAGE', 'REPORT_DIGITAL', 'REPORT_ANALOG', 'INPUT_PULLUP', 'SET_PIN_MODE', 'INTERNAL2V56', 'SYSTEM_RESET', 'LED_BUILTIN', 'INTERNAL1V1', 'SYSEX_START', 'INTERNAL', 'EXTERNAL', 'HIGH', @@ -452,10 +452,10 @@ class ArduinoLexer(CppLexer): 'signed', 'inline', 'delete', '_Bool', 'complex', '_Complex', '_Imaginary', 'atomic_bool', 'atomic_char', 'atomic_schar', 'atomic_uchar', 'atomic_short', 'atomic_ushort', 'atomic_int', 'atomic_uint', 'atomic_long', 'atomic_ulong', - 'atomic_llong', 'atomic_ullong', 'PROGMEM')) + 'atomic_llong', 'atomic_ullong', 'PROGMEM'} # Language shipped functions and class ( ) - functions = set(( + functions = { 'KeyboardController', 'MouseController', 'SoftwareSerial', 'EthernetServer', 'EthernetClient', 'LiquidCrystal', 'RobotControl', 'GSMVoiceCall', 'EthernetUDP', 'EsploraTFT', 'HttpClient', 'RobotMotor', 'WiFiClient', @@ -517,13 +517,13 @@ class ArduinoLexer(CppLexer): 'cos', 'sin', 'pow', 'map', 'abs', 'max', 'min', 'get', 'run', 'put', 'isAlphaNumeric', 'isAlpha', 'isAscii', 'isWhitespace', 'isControl', 'isDigit', 'isGraph', 'isLowerCase', 'isPrintable', 'isPunct', 'isSpace', 'isUpperCase', - 'isHexadecimalDigit')) + 'isHexadecimalDigit'} # do not highlight - suppress_highlight = set(( + suppress_highlight = { 'namespace', 'template', 'mutable', 'using', 'asm', 'typeid', 'typename', 'this', 'alignof', 'constexpr', 'decltype', 'noexcept', - 'static_assert', 'thread_local', 'restrict')) + 'static_assert', 'thread_local', 'restrict'} def get_tokens_unprocessed(self, text): for index, token, value in CppLexer.get_tokens_unprocessed(self, text): diff --git a/pygments/lexers/configs.py b/pygments/lexers/configs.py index 717246a..a18285a 100644 --- a/pygments/lexers/configs.py +++ b/pygments/lexers/configs.py @@ -300,7 +300,7 @@ class ApacheConfLexer(RegexLexer): tokens = { 'root': [ (r'\s+', Text), - (r'(#.*?)$', Comment), + (r'#(.*\\\n)+.*$|(#.*?)$', Comment), (r'(<[^\s>]+)(?:(\s+)(.*))?(>)', bygroups(Name.Tag, Text, String, Name.Tag)), (r'([a-z]\w*)(\s+)', @@ -319,7 +319,7 @@ class ApacheConfLexer(RegexLexer): r'os|productonly|full|emerg|alert|crit|error|warn|' r'notice|info|debug|registry|script|inetd|standalone|' r'user|group)\b', Keyword), - (r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double), + (r'"([^"\\]*(?:\\(.|[\n])[^"\\]*)*)"', String.Double), (r'[^\s"\\]+', Text) ], } @@ -540,14 +540,16 @@ class DockerLexer(RegexLexer): filenames = ['Dockerfile', '*.docker'] mimetypes = ['text/x-dockerfile-config'] - _keywords = (r'(?:FROM|MAINTAINER|EXPOSE|WORKDIR|USER|STOPSIGNAL)') + _keywords = (r'(?:MAINTAINER|EXPOSE|WORKDIR|USER|STOPSIGNAL)') _bash_keywords = (r'(?:RUN|CMD|ENTRYPOINT|ENV|ARG|LABEL|ADD|COPY)') - _lb = r'(?:\s*\\?\s*)' # dockerfile line break regex + _lb = r'(?:\s*\\?\s*)' # dockerfile line break regex flags = re.IGNORECASE | re.MULTILINE tokens = { 'root': [ (r'#.*', Comment), + (r'(FROM)([ \t]*)(\S*)([ \t]*)(?:(AS)([ \t]*)(\S*))?', + bygroups(Keyword, Text, String, Text, Keyword, Text, String)), (r'(ONBUILD)(%s)' % (_lb,), bygroups(Keyword, using(BashLexer))), (r'(HEALTHCHECK)((%s--\w+=\w+%s)*)' % (_lb, _lb), bygroups(Keyword, using(BashLexer))), diff --git a/pygments/lexers/csound.py b/pygments/lexers/csound.py index 4f754d4..c35bd94 100644 --- a/pygments/lexers/csound.py +++ b/pygments/lexers/csound.py @@ -35,6 +35,7 @@ class CsoundLexer(RegexLexer): 'preprocessor directives': [ (r'#(?:e(?:nd(?:if)?|lse)\b|##)|@@?[ \t]*\d+', Comment.Preproc), + (r'#includestr', Comment.Preproc, 'includestr directive'), (r'#include', Comment.Preproc, 'include directive'), (r'#[ \t]*define', Comment.Preproc, 'define directive'), (r'#(?:ifn?def|undef)\b', Comment.Preproc, 'macro directive') @@ -44,6 +45,10 @@ class CsoundLexer(RegexLexer): include('whitespace'), (r'([^ \t]).*?\1', String, '#pop') ], + 'includestr directive': [ + include('whitespace'), + (r'"', String, ('#pop', 'quoted string')) + ], 'define directive': [ (r'\n', Text), @@ -114,6 +119,13 @@ class CsoundLexer(RegexLexer): (r'\d+', Number.Integer) ], + 'quoted string': [ + (r'"', String, '#pop'), + (r'[^"$]+', String), + include('macro uses'), + (r'[$]', String) + ], + 'braced string': [ # Do nothing. This must be defined in subclasses. ] @@ -122,7 +134,7 @@ class CsoundLexer(RegexLexer): class CsoundScoreLexer(CsoundLexer): """ - For `Csound `_ scores. + For `Csound `_ scores. .. versionadded:: 2.1 """ @@ -144,7 +156,7 @@ class CsoundScoreLexer(CsoundLexer): (r'z', Keyword.Constant), # z is a constant equal to 800,000,000,000. 800 billion seconds is about # 25,367.8 years. See also - # https://csound.github.io/docs/manual/ScoreTop.html and + # https://csound.com/docs/manual/ScoreTop.html and # https://github.com/csound/csound/search?q=stof+path%3AEngine+filename%3Asread.c. (r'([nNpP][pP])(\d+)', bygroups(Keyword, Number.Integer)), @@ -164,13 +176,6 @@ class CsoundScoreLexer(CsoundLexer): (r'\n', Text, '#pop') ], - 'quoted string': [ - (r'"', String, '#pop'), - (r'[^"$]+', String), - include('macro uses'), - (r'[$]', String) - ], - 'loop after left brace': [ include('whitespace and macro uses'), (r'\d+', Number.Integer, ('#pop', 'loop after repeat count')), @@ -195,7 +200,7 @@ class CsoundScoreLexer(CsoundLexer): class CsoundOrchestraLexer(CsoundLexer): """ - For `Csound `_ orchestras. + For `Csound `_ orchestras. .. versionadded:: 2.1 """ @@ -212,28 +217,25 @@ class CsoundOrchestraLexer(CsoundLexer): yield match.start(), Name.Function, opcode def name_callback(lexer, match): + type_annotation_token = Keyword.Type + name = match.group(1) if name in OPCODES or name in DEPRECATED_OPCODES: yield match.start(), Name.Builtin, name - if match.group(2): - yield match.start(2), Punctuation, match.group(2) - yield match.start(3), Keyword.Type, match.group(3) elif name in lexer.user_defined_opcodes: yield match.start(), Name.Function, name else: - nameMatch = re.search(r'^(g?[afikSw])(\w+)', name) - if nameMatch: - yield nameMatch.start(1), Keyword.Type, nameMatch.group(1) - yield nameMatch.start(2), Name, nameMatch.group(2) + type_annotation_token = Name + name_match = re.search(r'^(g?[afikSw])(\w+)', name) + if name_match: + yield name_match.start(1), Keyword.Type, name_match.group(1) + yield name_match.start(2), Name, name_match.group(2) else: yield match.start(), Name, name - # If there's a trailing :V, for example, we want to keep this around - # and emit it as well, otherwise this lexer will not pass round-trip - # testing - if match.group(2): - yield match.start(2), Punctuation, match.group(2) - yield match.start(3), Name, match.group(3) + if match.group(2): + yield match.start(2), Punctuation, match.group(2) + yield match.start(3), type_annotation_token, match.group(3) tokens = { 'root': [ @@ -328,13 +330,13 @@ class CsoundOrchestraLexer(CsoundLexer): (r'\\(?:[\\abnrt"]|[0-7]{1,3})', String.Escape) ], # Format specifiers are highlighted in all strings, even though only - # fprintks https://csound.github.io/docs/manual/fprintks.html - # fprints https://csound.github.io/docs/manual/fprints.html - # printf/printf_i https://csound.github.io/docs/manual/printf.html - # printks https://csound.github.io/docs/manual/printks.html - # prints https://csound.github.io/docs/manual/prints.html - # sprintf https://csound.github.io/docs/manual/sprintf.html - # sprintfk https://csound.github.io/docs/manual/sprintfk.html + # fprintks https://csound.com/docs/manual/fprintks.html + # fprints https://csound.com/docs/manual/fprints.html + # printf/printf_i https://csound.com/docs/manual/printf.html + # printks https://csound.com/docs/manual/printks.html + # prints https://csound.com/docs/manual/prints.html + # sprintf https://csound.com/docs/manual/sprintf.html + # sprintfk https://csound.com/docs/manual/sprintfk.html # work with strings that contain format specifiers. In addition, these # opcodes’ handling of format specifiers is inconsistent: # - fprintks, fprints, printks, and prints do accept %a and %A @@ -371,6 +373,7 @@ class CsoundOrchestraLexer(CsoundLexer): 'Csound score opcode': [ include('whitespace and macro uses'), + (r'"', String, 'quoted string'), (r'\{\{', String, 'Csound score'), (r'\n', Text, '#pop') ], @@ -381,6 +384,7 @@ class CsoundOrchestraLexer(CsoundLexer): 'Python opcode': [ include('whitespace and macro uses'), + (r'"', String, 'quoted string'), (r'\{\{', String, 'Python'), (r'\n', Text, '#pop') ], @@ -391,6 +395,7 @@ class CsoundOrchestraLexer(CsoundLexer): 'Lua opcode': [ include('whitespace and macro uses'), + (r'"', String, 'quoted string'), (r'\{\{', String, 'Lua'), (r'\n', Text, '#pop') ], @@ -403,7 +408,7 @@ class CsoundOrchestraLexer(CsoundLexer): class CsoundDocumentLexer(RegexLexer): """ - For `Csound `_ documents. + For `Csound `_ documents. .. versionadded:: 2.1 """ diff --git a/pygments/lexers/data.py b/pygments/lexers/data.py index b325354..46ca734 100644 --- a/pygments/lexers/data.py +++ b/pygments/lexers/data.py @@ -233,7 +233,7 @@ class YamlLexer(ExtendedRegexLexer): # whitespaces separating tokens (r'[ ]+', Text), # key with colon - (r'''([^,:?\[\]{}"'\n]+)(:)(?=[ ]|$)''', + (r'''([^#,:?\[\]{}"'\n]+)(:)(?=[ ]|$)''', bygroups(Name.Tag, set_indent(Punctuation, implicit=True))), # tags, anchors and aliases, include('descriptors'), diff --git a/pygments/lexers/dsls.py b/pygments/lexers/dsls.py index 38a805e..0af3c6c 100644 --- a/pygments/lexers/dsls.py +++ b/pygments/lexers/dsls.py @@ -16,7 +16,7 @@ from pygments.lexer import ExtendedRegexLexer, RegexLexer, bygroups, words, \ from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Literal, Whitespace -__all__ = ['ProtoBufLexer', 'BroLexer', 'PuppetLexer', 'RslLexer', +__all__ = ['ProtoBufLexer', 'ZeekLexer', 'PuppetLexer', 'RslLexer', 'MscgenLexer', 'VGLLexer', 'AlloyLexer', 'PanLexer', 'CrmshLexer', 'ThriftLexer', 'FlatlineLexer', 'SnowballLexer'] @@ -40,9 +40,9 @@ class ProtoBufLexer(RegexLexer): (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), (words(( - 'import', 'option', 'optional', 'required', 'repeated', 'default', - 'packed', 'ctype', 'extensions', 'to', 'max', 'rpc', 'returns', - 'oneof'), prefix=r'\b', suffix=r'\b'), + 'import', 'option', 'optional', 'required', 'repeated', + 'reserved', 'default', 'packed', 'ctype', 'extensions', 'to', + 'max', 'rpc', 'returns', 'oneof'), prefix=r'\b', suffix=r'\b'), Keyword), (words(( 'int32', 'int64', 'uint32', 'uint64', 'sint32', 'sint64', @@ -188,84 +188,166 @@ class ThriftLexer(RegexLexer): } -class BroLexer(RegexLexer): +class ZeekLexer(RegexLexer): """ - For `Bro `_ scripts. + For `Zeek `_ scripts. - .. versionadded:: 1.5 + .. versionadded:: 2.5 """ - name = 'Bro' - aliases = ['bro'] - filenames = ['*.bro'] + name = 'Zeek' + aliases = ['zeek', 'bro'] + filenames = ['*.zeek', '*.bro'] - _hex = r'[0-9a-fA-F_]' + _hex = r'[0-9a-fA-F]' _float = r'((\d*\.?\d+)|(\d+\.?\d*))([eE][-+]?\d+)?' _h = r'[A-Za-z0-9][-A-Za-z0-9]*' tokens = { 'root': [ - # Whitespace - (r'^@.*?\n', Comment.Preproc), - (r'#.*?\n', Comment.Single), + include('whitespace'), + include('comments'), + include('directives'), + include('attributes'), + include('types'), + include('keywords'), + include('literals'), + include('operators'), + include('punctuation'), + (r'((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)(?=\s*\()', + Name.Function), + include('identifiers'), + ], + + 'whitespace': [ (r'\n', Text), (r'\s+', Text), (r'\\\n', Text), - # Keywords - (r'(add|alarm|break|case|const|continue|delete|do|else|enum|event' - r'|export|for|function|if|global|hook|local|module|next' - r'|of|print|redef|return|schedule|switch|type|when|while)\b', Keyword), - (r'(addr|any|bool|count|counter|double|file|int|interval|net' - r'|pattern|port|record|set|string|subnet|table|time|timer' - r'|vector)\b', Keyword.Type), + ], + + 'comments': [ + (r'#.*$', Comment), + ], + + 'directives': [ + (r'@(load-plugin|load-sigs|load|unload)\b.*$', Comment.Preproc), + (r'@(DEBUG|DIR|FILENAME|deprecated|if|ifdef|ifndef|else|endif)\b', Comment.Preproc), + (r'(@prefixes)\s*(\+?=).*$', Comment.Preproc), + ], + + 'attributes': [ + (words(('redef', 'priority', 'log', 'optional', 'default', 'add_func', + 'delete_func', 'expire_func', 'read_expire', 'write_expire', + 'create_expire', 'synchronized', 'persistent', 'rotate_interval', + 'rotate_size', 'encrypt', 'raw_output', 'mergeable', 'error_handler', + 'type_column', 'deprecated'), + prefix=r'&', suffix=r'\b'), + Keyword.Pseudo), + ], + + 'types': [ + (words(('any', + 'enum', 'record', 'set', 'table', 'vector', + 'function', 'hook', 'event', + 'addr', 'bool', 'count', 'double', 'file', 'int', 'interval', + 'pattern', 'port', 'string', 'subnet', 'time'), + suffix=r'\b'), + Keyword.Type), + + (r'(opaque)(\s+)(of)(\s+)((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)\b', + bygroups(Keyword.Type, Text, Operator.Word, Text, Keyword.Type)), + + (r'(type)(\s+)((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)(\s*)(:)(\s*)\b(record|enum)\b', + bygroups(Keyword, Text, Name.Class, Text, Operator, Text, Keyword.Type)), + + (r'(type)(\s+)((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)(\s*)(:)', + bygroups(Keyword, Text, Name, Text, Operator)), + + (r'(redef)(\s+)(record|enum)(\s+)((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)\b', + bygroups(Keyword, Text, Keyword.Type, Text, Name.Class)), + ], + + 'keywords': [ + (words(('redef', 'export', 'if', 'else', 'for', 'while', + 'return', 'break', 'next', 'continue', 'fallthrough', + 'switch', 'default', 'case', + 'add', 'delete', + 'when', 'timeout', 'schedule'), + suffix=r'\b'), + Keyword), + (r'(print)\b', Keyword), + (r'(global|local|const|option)\b', Keyword.Declaration), + (r'(module)(\s+)(([A-Za-z_]\w*)(?:::([A-Za-z_]\w*))*)\b', + bygroups(Keyword.Namespace, Text, Name.Namespace)), + ], + + 'literals': [ + (r'"', String, 'string'), + + # Not the greatest match for patterns, but generally helps + # disambiguate between start of a pattern and just a division + # operator. + (r'/(?=.*/)', String.Regex, 'regex'), + (r'(T|F)\b', Keyword.Constant), - (r'(&)((?:add|delete|expire)_func|attr|(?:create|read|write)_expire' - r'|default|disable_print_hook|raw_output|encrypt|group|log' - r'|mergeable|optional|persistent|priority|redef' - r'|rotate_(?:interval|size)|synchronized)\b', - bygroups(Punctuation, Keyword)), - (r'\s+module\b', Keyword.Namespace), - # Addresses, ports and networks - (r'\d+/(tcp|udp|icmp|unknown)\b', Number), - (r'(\d+\.){3}\d+', Number), - (r'(' + _hex + r'){7}' + _hex, Number), - (r'0x' + _hex + r'(' + _hex + r'|:)*::(' + _hex + r'|:)*', Number), - (r'((\d+|:)(' + _hex + r'|:)*)?::(' + _hex + r'|:)*', Number), - (r'(\d+\.\d+\.|(\d+\.){2}\d+)', Number), + + # Port + (r'\d{1,5}/(udp|tcp|icmp|unknown)\b', Number), + + # IPv4 Address + (r'(\d{1,3}.){3}(\d{1,3})\b', Number), + + # IPv6 Address + (r'\[([0-9a-fA-F]{0,4}:){2,7}([0-9a-fA-F]{0,4})?((\d{1,3}.){3}(\d{1,3}))?\]', Number), + + # Numeric + (r'0[xX]' + _hex + r'+\b', Number.Hex), + (_float + r'\s*(day|hr|min|sec|msec|usec)s?\b', Number.Float), + (_float + r'\b', Number.Float), + (r'(\d+)\b', Number.Integer), + # Hostnames (_h + r'(\.' + _h + r')+', String), - # Numeric - (_float + r'\s+(day|hr|min|sec|msec|usec)s?\b', Literal.Date), - (r'0[xX]' + _hex, Number.Hex), - (_float, Number.Float), - (r'\d+', Number.Integer), - (r'/', String.Regex, 'regex'), - (r'"', String, 'string'), - # Operators - (r'[!%*/+:<=>?~|-]', Operator), + ], + + 'operators': [ + (r'[!%*/+<=>~|&^-]', Operator), (r'([-+=&|]{2}|[+=!><-]=)', Operator), - (r'(in|match)\b', Operator.Word), - (r'[{}()\[\]$.,;]', Punctuation), - # Identfier - (r'([_a-zA-Z]\w*)(::)', bygroups(Name, Name.Namespace)), + (r'(in|as|is|of)\b', Operator.Word), + (r'\??\$', Operator), + ], + + 'punctuation': [ + (r'[{}()\[\],;.]', Punctuation), + # The "ternary if", which uses '?' and ':', could instead be + # treated as an Operator, but colons are more frequently used to + # separate field/identifier names from their types, so the (often) + # less-prominent Punctuation is used even with '?' for consistency. + (r'[?:]', Punctuation), + ], + + 'identifiers': [ + (r'([a-zA-Z_]\w*)(::)', bygroups(Name, Punctuation)), (r'[a-zA-Z_]\w*', Name) ], + 'string': [ + (r'\\.', String.Escape), + (r'%-?[0-9]*(\.[0-9]+)?[DTdxsefg]', String.Escape), (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - (r'[^\\"\n]+', String), - (r'\\\n', String), - (r'\\', String) + (r'.', String), ], + 'regex': [ + (r'\\.', String.Escape), (r'/', String.Regex, '#pop'), - (r'\\[\\nt/]', String.Regex), # String.Escape is too intense here. - (r'[^\\/\n]+', String.Regex), - (r'\\\n', String.Regex), - (r'\\', String.Regex) - ] + (r'.', String.Regex), + ], } +BroLexer = ZeekLexer + + class PuppetLexer(RegexLexer): """ For `Puppet `__ configuration DSL. diff --git a/pygments/lexers/dylan.py b/pygments/lexers/dylan.py index b9a13e2..dd972bf 100644 --- a/pygments/lexers/dylan.py +++ b/pygments/lexers/dylan.py @@ -32,27 +32,27 @@ class DylanLexer(RegexLexer): flags = re.IGNORECASE - builtins = set(( + builtins = { 'subclass', 'abstract', 'block', 'concrete', 'constant', 'class', 'compiler-open', 'compiler-sideways', 'domain', 'dynamic', 'each-subclass', 'exception', 'exclude', 'function', 'generic', 'handler', 'inherited', 'inline', 'inline-only', 'instance', 'interface', 'import', 'keyword', 'library', 'macro', 'method', 'module', 'open', 'primary', 'required', 'sealed', 'sideways', - 'singleton', 'slot', 'thread', 'variable', 'virtual')) + 'singleton', 'slot', 'thread', 'variable', 'virtual'} - keywords = set(( + keywords = { 'above', 'afterwards', 'begin', 'below', 'by', 'case', 'cleanup', 'create', 'define', 'else', 'elseif', 'end', 'export', 'finally', 'for', 'from', 'if', 'in', 'let', 'local', 'otherwise', 'rename', 'select', 'signal', 'then', 'to', 'unless', 'until', 'use', 'when', - 'while')) + 'while'} - operators = set(( + operators = { '~', '+', '-', '*', '|', '^', '=', '==', '~=', '~==', '<', '<=', - '>', '>=', '&', '|')) + '>', '>=', '&', '|'} - functions = set(( + functions = { 'abort', 'abs', 'add', 'add!', 'add-method', 'add-new', 'add-new!', 'all-superclasses', 'always', 'any?', 'applicable-method?', 'apply', 'aref', 'aref-setter', 'as', 'as-lowercase', 'as-lowercase!', @@ -86,7 +86,7 @@ class DylanLexer(RegexLexer): 'subtype?', 'table-protocol', 'tail', 'tail-setter', 'third', 'third-setter', 'truncate', 'truncate/', 'type-error-expected-type', 'type-error-value', 'type-for-copy', 'type-union', 'union', 'values', - 'vector', 'zero?')) + 'vector', 'zero?'} valid_name = '\\\\?[\\w!&*<>|^$%@\\-+~?/=]+' diff --git a/pygments/lexers/elm.py b/pygments/lexers/elm.py index 86dafc7..ee941d7 100644 --- a/pygments/lexers/elm.py +++ b/pygments/lexers/elm.py @@ -77,7 +77,7 @@ class ElmLexer(RegexLexer): (words((builtinOps), prefix=r'\(', suffix=r'\)'), Name.Function), # Infix Operators - (words((builtinOps)), Name.Function), + (words(builtinOps), Name.Function), # Numbers include('numbers'), diff --git a/pygments/lexers/email.py b/pygments/lexers/email.py new file mode 100644 index 0000000..5ad225b --- /dev/null +++ b/pygments/lexers/email.py @@ -0,0 +1,154 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.email + ~~~~~~~~~~~~~~~~~~~~~ + + Lexer for the raw E-mail. + + :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, DelegatingLexer, bygroups +from pygments.lexers.mime import MIMELexer +from pygments.token import Text, Keyword, Name, String, Number, Comment +from pygments.util import get_bool_opt + +__all__ = ["EmailLexer"] + + +class EmailHeaderLexer(RegexLexer): + """ + Sub-lexer for raw E-mail. This lexer only process header part of e-mail. + + .. versionadded:: 2.5 + """ + + def __init__(self, **options): + super(EmailHeaderLexer, self).__init__(**options) + self.highlight_x = get_bool_opt(options, "highlight-X-header", False) + + def get_x_header_tokens(self, match): + if self.highlight_x: + # field + yield match.start(1), Name.Tag, match.group(1) + + # content + default_actions = self.get_tokens_unprocessed( + match.group(2), stack=("root", "header")) + for item in default_actions: + yield item + else: + # lowlight + yield match.start(1), Comment.Special, match.group(1) + yield match.start(2), Comment.Multiline, match.group(2) + + tokens = { + "root": [ + (r"^(?:[A-WYZ]|X400)[\w\-]*:", Name.Tag, "header"), + (r"^(X-(?:\w[\w\-]*:))([\s\S]*?\n)(?![ \t])", get_x_header_tokens), + ], + "header": [ + # folding + (r"\n[ \t]", Text.Whitespace), + (r"\n(?![ \t])", Text.Whitespace, "#pop"), + + # keywords + (r"\bE?SMTPS?\b", Keyword), + (r"\b(?:HE|EH)LO\b", Keyword), + + # mailbox + (r"[\w\.\-\+=]+@[\w\.\-]+", Name.Label), + (r"<[\w\.\-\+=]+@[\w\.\-]+>", Name.Label), + + # domain + (r"\b(\w[\w\.-]*\.[\w\.-]*\w[a-zA-Z]+)\b", Name.Function), + + # IPv4 + ( + r"(?<=\b)(?:(?:25[0-5]|2[0-4][0-9]|1?[0-9][0-9]?)\.){3}(?:25[0" + r"-5]|2[0-4][0-9]|1?[0-9][0-9]?)(?=\b)", + Number.Integer, + ), + + # IPv6 + (r"(?<=\b)([0-9a-fA-F]{1,4}:){1,7}:(?!\b)", Number.Hex), + (r"(?<=\b):((:[0-9a-fA-F]{1,4}){1,7}|:)(?=\b)", Number.Hex), + (r"(?<=\b)([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}(?=\b)", Number.Hex), + (r"(?<=\b)([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}(?=\b)", Number.Hex), + (r"(?<=\b)[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})(?=\b)", Number.Hex), + (r"(?<=\b)fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}(?=\b)", Number.Hex), + (r"(?<=\b)([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}(?=\b)", Number.Hex), + (r"(?<=\b)([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}(?=\b)", + Number.Hex), + (r"(?<=\b)([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}(?=\b)", + Number.Hex), + (r"(?<=\b)([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}(?=\b)", + Number.Hex), + ( + r"(?<=\b)::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}" + r"[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}" + r"[0-9])(?=\b)", + Number.Hex, + ), + ( + r"(?<=\b)([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-" + r"9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-" + r"9])(?=\b)", + Number.Hex, + ), + + # Date time + ( + r"(?:(Sun|Mon|Tue|Wed|Thu|Fri|Sat),\s+)?(0[1-9]|[1-2]?[0-9]|3[" + r"01])\s+(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s+(" + r"19[0-9]{2}|[2-9][0-9]{3})\s+(2[0-3]|[0-1][0-9]):([0-5][0-9])" + r"(?::(60|[0-5][0-9]))?(?:\.\d{1,5})?\s+([-\+][0-9]{2}[0-5][0-" + r"9]|\(?(?:UTC?|GMT|(?:E|C|M|P)(?:ST|ET|DT)|[A-IK-Z])\)?)", + Name.Decorator, + ), + + # RFC-2047 encoded string + ( + r"(=\?)([\w-]+)(\?)([BbQq])(\?)([\[\w!\"#$%&\'()*+,-./:;<=>@[\\" + r"\]^_`{|}~]+)(\?=)", + bygroups( + String.Affix, + Name.Constant, + String.Affix, + Keyword.Constant, + String.Affix, + Number.Hex, + String.Affix + ) + ), + + # others + (r'[\s]+', Text.Whitespace), + (r'[\S]', Text), + ], + } + + +class EmailLexer(DelegatingLexer): + """ + Lexer for raw E-mail. + + Additional options accepted: + + `highlight-X-header` + Highlight the fields of ``X-`` user-defined email header. (default: + ``False``). + + .. versionadded:: 2.5 + """ + + name = "E-mail" + aliases = ["email", "eml"] + filenames = ["*.eml"] + mimetypes = ["message/rfc822"] + + def __init__(self, **options): + super(EmailLexer, self).__init__( + EmailHeaderLexer, MIMELexer, Comment, **options + ) diff --git a/pygments/lexers/erlang.py b/pygments/lexers/erlang.py index 3d9b0fd..07a46c8 100644 --- a/pygments/lexers/erlang.py +++ b/pygments/lexers/erlang.py @@ -163,7 +163,7 @@ class ErlangShellLexer(Lexer): filenames = ['*.erl-sh'] mimetypes = ['text/x-erl-shellsession'] - _prompt_re = re.compile(r'\d+>(?=\s|\Z)') + _prompt_re = re.compile(r'(?:\([\w@_.]+\))?\d+>(?=\s|\Z)') def get_tokens_unprocessed(self, text): erlexer = ErlangLexer(**self.options) @@ -495,7 +495,7 @@ class ElixirConsoleLexer(Lexer): aliases = ['iex'] mimetypes = ['text/x-elixir-shellsession'] - _prompt_re = re.compile(r'(iex|\.{3})(\(\d+\))?> ') + _prompt_re = re.compile(r'(iex|\.{3})((?:\([\w@_.]+\))?\d+|\(\d+\))?> ') def get_tokens_unprocessed(self, text): exlexer = ElixirLexer(**self.options) diff --git a/pygments/lexers/freefem.py b/pygments/lexers/freefem.py index c43b285..3e9ac8e 100644 --- a/pygments/lexers/freefem.py +++ b/pygments/lexers/freefem.py @@ -36,27 +36,27 @@ class FreeFemLexer(CppLexer): mimetypes = ['text/x-freefem'] # Language operators - operators = set(('+', '-', '*', '.*', '/', './', '%', '^', '^-1', ':', '\'')) + operators = {'+', '-', '*', '.*', '/', './', '%', '^', '^-1', ':', '\''} # types - types = set(('bool', 'border', 'complex', 'dmatrix', 'fespace', 'func', 'gslspline', - 'ifstream', 'int', 'macro', 'matrix', 'mesh', 'mesh3', 'mpiComm', - 'mpiGroup', 'mpiRequest', 'NewMacro', 'EndMacro', 'ofstream', 'Pmmap', - 'problem', 'Psemaphore', 'real', 'solve', 'string', 'varf')) + types = {'bool', 'border', 'complex', 'dmatrix', 'fespace', 'func', 'gslspline', + 'ifstream', 'int', 'macro', 'matrix', 'mesh', 'mesh3', 'mpiComm', + 'mpiGroup', 'mpiRequest', 'NewMacro', 'EndMacro', 'ofstream', 'Pmmap', + 'problem', 'Psemaphore', 'real', 'solve', 'string', 'varf'} # finite element spaces - fespaces = set(('BDM1', 'BDM1Ortho', 'Edge03d', 'Edge13d', 'Edge23d', 'FEQF', 'HCT', - 'P0', 'P03d', 'P0Edge', 'P1', 'P13d', 'P1b', 'P1b3d', 'P1bl', 'P1bl3d', - 'P1dc', 'P1Edge', 'P1nc', 'P2', 'P23d', 'P2b', 'P2BR', 'P2dc', 'P2Edge', - 'P2h', 'P2Morley', 'P2pnc', 'P3', 'P3dc', 'P3Edge', 'P4', 'P4dc', - 'P4Edge', 'P5Edge', 'RT0', 'RT03d', 'RT0Ortho', 'RT1', 'RT1Ortho', - 'RT2', 'RT2Ortho')) + fespaces = {'BDM1', 'BDM1Ortho', 'Edge03d', 'Edge13d', 'Edge23d', 'FEQF', 'HCT', + 'P0', 'P03d', 'P0Edge', 'P1', 'P13d', 'P1b', 'P1b3d', 'P1bl', 'P1bl3d', + 'P1dc', 'P1Edge', 'P1nc', 'P2', 'P23d', 'P2b', 'P2BR', 'P2dc', 'P2Edge', + 'P2h', 'P2Morley', 'P2pnc', 'P3', 'P3dc', 'P3Edge', 'P4', 'P4dc', + 'P4Edge', 'P5Edge', 'RT0', 'RT03d', 'RT0Ortho', 'RT1', 'RT1Ortho', + 'RT2', 'RT2Ortho'} # preprocessor - preprocessor = set(('ENDIFMACRO', 'include', 'IFMACRO', 'load')) + preprocessor = {'ENDIFMACRO', 'include', 'IFMACRO', 'load'} # Language keywords - keywords = set(( + keywords = { 'adj', 'append', 'area', @@ -169,10 +169,10 @@ class FreeFemLexer(CppLexer): 'x', 'y', 'z' - )) + } # Language shipped functions and class ( ) - functions = set(( + functions = { 'abs', 'acos', 'acosh', @@ -702,10 +702,10 @@ class FreeFemLexer(CppLexer): 'y0', 'y1', 'yn' - )) + } # function parameters - parameters = set(( + parameters = { 'A', 'A1', 'abserror', @@ -849,13 +849,13 @@ class FreeFemLexer(CppLexer): 'WindowIndex', 'which', 'zbound' - )) + } # deprecated - deprecated = set(('fixeborder',)) + deprecated = {'fixeborder'} # do not highlight - suppress_highlight = set(( + suppress_highlight = { 'alignof', 'asm', 'constexpr', @@ -874,7 +874,7 @@ class FreeFemLexer(CppLexer): 'typeid', 'typename', 'using' - )) + } def get_tokens_unprocessed(self, text): for index, token, value in CppLexer.get_tokens_unprocessed(self, text): diff --git a/pygments/lexers/haskell.py b/pygments/lexers/haskell.py index d9eecaa..0c0917e 100644 --- a/pygments/lexers/haskell.py +++ b/pygments/lexers/haskell.py @@ -325,10 +325,10 @@ class AgdaLexer(RegexLexer): # Identifiers (r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved), (r'(import|module)(\s+)', bygroups(Keyword.Reserved, Text), 'module'), - (r'\b(Set|Prop)\b', Keyword.Type), + (u'\\b(Set|Prop)[\u2080-\u2089]*\\b', Keyword.Type), # Special Symbols (r'(\(|\)|\{|\})', Operator), - (u'(\\.{1,3}|\\||\u039B|\u2200|\u2192|:|=|->)', Operator.Word), + (u'(\\.{1,3}|\\||\u03BB|\u2200|\u2192|:|=|->)', Operator.Word), # Numbers (r'\d+[eE][+-]?\d+', Number.Float), (r'\d+\.\d+([eE][+-]?\d+)?', Number.Float), @@ -481,10 +481,10 @@ class CryptolLexer(RegexLexer): ], } - EXTRA_KEYWORDS = set(('join', 'split', 'reverse', 'transpose', 'width', - 'length', 'tail', '<<', '>>', '<<<', '>>>', 'const', - 'reg', 'par', 'seq', 'ASSERT', 'undefined', 'error', - 'trace')) + EXTRA_KEYWORDS = {'join', 'split', 'reverse', 'transpose', 'width', + 'length', 'tail', '<<', '>>', '<<<', '>>>', 'const', + 'reg', 'par', 'seq', 'ASSERT', 'undefined', 'error', + 'trace'} def get_tokens_unprocessed(self, text): stack = ['root'] diff --git a/pygments/lexers/haxe.py b/pygments/lexers/haxe.py index 38578e5..b357508 100644 --- a/pygments/lexers/haxe.py +++ b/pygments/lexers/haxe.py @@ -79,7 +79,7 @@ class HaxeLexer(ExtendedRegexLexer): if proc in ['error']: ctx.stack.append('preproc-error') - yield match.start(), Comment.Preproc, '#' + proc + yield match.start(), Comment.Preproc, u'#' + proc ctx.pos = match.end() tokens = { diff --git a/pygments/lexers/hdl.py b/pygments/lexers/hdl.py index e7c7617..b45654e 100644 --- a/pygments/lexers/hdl.py +++ b/pygments/lexers/hdl.py @@ -131,15 +131,6 @@ class VerilogLexer(RegexLexer): ] } - def get_tokens_unprocessed(self, text): - for index, token, value in \ - RegexLexer.get_tokens_unprocessed(self, text): - # Convention: mark all upper case names as constants - if token is Name: - if value.isupper(): - token = Name.Constant - yield index, token, value - class SystemVerilogLexer(RegexLexer): """ @@ -184,63 +175,75 @@ class SystemVerilogLexer(RegexLexer): (r'`[a-zA-Z_]\w*', Name.Constant), (words(( - 'accept_on', 'alias', 'always', 'always_comb', 'always_ff', 'always_latch', - 'and', 'assert', 'assign', 'assume', 'automatic', 'before', 'begin', 'bind', 'bins', - 'binsof', 'bit', 'break', 'buf', 'bufif0', 'bufif1', 'byte', 'case', 'casex', 'casez', - 'cell', 'chandle', 'checker', 'class', 'clocking', 'cmos', 'config', 'const', 'constraint', - 'context', 'continue', 'cover', 'covergroup', 'coverpoint', 'cross', 'deassign', - 'default', 'defparam', 'design', 'disable', 'dist', 'do', 'edge', 'else', 'end', 'endcase', - 'endchecker', 'endclass', 'endclocking', 'endconfig', 'endfunction', 'endgenerate', - 'endgroup', 'endinterface', 'endmodule', 'endpackage', 'endprimitive', - 'endprogram', 'endproperty', 'endsequence', 'endspecify', 'endtable', - 'endtask', 'enum', 'event', 'eventually', 'expect', 'export', 'extends', 'extern', - 'final', 'first_match', 'for', 'force', 'foreach', 'forever', 'fork', 'forkjoin', - 'function', 'generate', 'genvar', 'global', 'highz0', 'highz1', 'if', 'iff', 'ifnone', - 'ignore_bins', 'illegal_bins', 'implies', 'import', 'incdir', 'include', - 'initial', 'inout', 'input', 'inside', 'instance', 'int', 'integer', 'interface', - 'intersect', 'join', 'join_any', 'join_none', 'large', 'let', 'liblist', 'library', - 'local', 'localparam', 'logic', 'longint', 'macromodule', 'matches', 'medium', - 'modport', 'module', 'nand', 'negedge', 'new', 'nexttime', 'nmos', 'nor', 'noshowcancelled', - 'not', 'notif0', 'notif1', 'null', 'or', 'output', 'package', 'packed', 'parameter', - 'pmos', 'posedge', 'primitive', 'priority', 'program', 'property', 'protected', - 'pull0', 'pull1', 'pulldown', 'pullup', 'pulsestyle_ondetect', 'pulsestyle_onevent', - 'pure', 'rand', 'randc', 'randcase', 'randsequence', 'rcmos', 'real', 'realtime', - 'ref', 'reg', 'reject_on', 'release', 'repeat', 'restrict', 'return', 'rnmos', - 'rpmos', 'rtran', 'rtranif0', 'rtranif1', 's_always', 's_eventually', 's_nexttime', - 's_until', 's_until_with', 'scalared', 'sequence', 'shortint', 'shortreal', - 'showcancelled', 'signed', 'small', 'solve', 'specify', 'specparam', 'static', - 'string', 'strong', 'strong0', 'strong1', 'struct', 'super', 'supply0', 'supply1', - 'sync_accept_on', 'sync_reject_on', 'table', 'tagged', 'task', 'this', 'throughout', - 'time', 'timeprecision', 'timeunit', 'tran', 'tranif0', 'tranif1', 'tri', 'tri0', - 'tri1', 'triand', 'trior', 'trireg', 'type', 'typedef', 'union', 'unique', 'unique0', - 'unsigned', 'until', 'until_with', 'untyped', 'use', 'uwire', 'var', 'vectored', - 'virtual', 'void', 'wait', 'wait_order', 'wand', 'weak', 'weak0', 'weak1', 'while', - 'wildcard', 'wire', 'with', 'within', 'wor', 'xnor', 'xor'), suffix=r'\b'), + 'accept_on', 'alias', 'always', 'always_comb', 'always_ff', + 'always_latch', 'and', 'assert', 'assign', 'assume', 'automatic', + 'before', 'begin', 'bind', 'bins', 'binsof', 'bit', 'break', 'buf', + 'bufif0', 'bufif1', 'byte', 'case', 'casex', 'casez', 'cell', + 'chandle', 'checker', 'class', 'clocking', 'cmos', 'config', + 'const', 'constraint', 'context', 'continue', 'cover', 'covergroup', + 'coverpoint', 'cross', 'deassign', 'default', 'defparam', 'design', + 'disable', 'dist', 'do', 'edge', 'else', 'end', 'endcase', + 'endchecker', 'endclass', 'endclocking', 'endconfig', 'endfunction', + 'endgenerate', 'endgroup', 'endinterface', 'endmodule', 'endpackage', + 'endprimitive', 'endprogram', 'endproperty', 'endsequence', + 'endspecify', 'endtable', 'endtask', 'enum', 'event', 'eventually', + 'expect', 'export', 'extends', 'extern', 'final', 'first_match', + 'for', 'force', 'foreach', 'forever', 'fork', 'forkjoin', 'function', + 'generate', 'genvar', 'global', 'highz0', 'highz1', 'if', 'iff', + 'ifnone', 'ignore_bins', 'illegal_bins', 'implies', 'import', + 'incdir', 'include', 'initial', 'inout', 'input', 'inside', + 'instance', 'int', 'integer', 'interface', 'intersect', 'join', + 'join_any', 'join_none', 'large', 'let', 'liblist', 'library', + 'local', 'localparam', 'logic', 'longint', 'macromodule', 'matches', + 'medium', 'modport', 'module', 'nand', 'negedge', 'new', 'nexttime', + 'nmos', 'nor', 'noshowcancelled', 'not', 'notif0', 'notif1', 'null', + 'or', 'output', 'package', 'packed', 'parameter', 'pmos', 'posedge', + 'primitive', 'priority', 'program', 'property', 'protected', 'pull0', + 'pull1', 'pulldown', 'pullup', 'pulsestyle_ondetect', + 'pulsestyle_onevent', 'pure', 'rand', 'randc', 'randcase', + 'randsequence', 'rcmos', 'real', 'realtime', 'ref', 'reg', + 'reject_on', 'release', 'repeat', 'restrict', 'return', 'rnmos', + 'rpmos', 'rtran', 'rtranif0', 'rtranif1', 's_always', 's_eventually', + 's_nexttime', 's_until', 's_until_with', 'scalared', 'sequence', + 'shortint', 'shortreal', 'showcancelled', 'signed', 'small', 'solve', + 'specify', 'specparam', 'static', 'string', 'strong', 'strong0', + 'strong1', 'struct', 'super', 'supply0', 'supply1', 'sync_accept_on', + 'sync_reject_on', 'table', 'tagged', 'task', 'this', 'throughout', + 'time', 'timeprecision', 'timeunit', 'tran', 'tranif0', 'tranif1', + 'tri', 'tri0', 'tri1', 'triand', 'trior', 'trireg', 'type', + 'typedef', 'union', 'unique', 'unique0', 'unsigned', 'until', + 'until_with', 'untyped', 'use', 'uwire', 'var', 'vectored', + 'virtual', 'void', 'wait', 'wait_order', 'wand', 'weak', 'weak0', + 'weak1', 'while', 'wildcard', 'wire', 'with', 'within', 'wor', + 'xnor', 'xor'), suffix=r'\b'), Keyword), (words(( - '`__FILE__', '`__LINE__', '`begin_keywords', '`celldefine', '`default_nettype', - '`define', '`else', '`elsif', '`end_keywords', '`endcelldefine', '`endif', - '`ifdef', '`ifndef', '`include', '`line', '`nounconnected_drive', '`pragma', - '`resetall', '`timescale', '`unconnected_drive', '`undef', '`undefineall'), + '`__FILE__', '`__LINE__', '`begin_keywords', '`celldefine', + '`default_nettype', '`define', '`else', '`elsif', '`end_keywords', + '`endcelldefine', '`endif', '`ifdef', '`ifndef', '`include', + '`line', '`nounconnected_drive', '`pragma', '`resetall', + '`timescale', '`unconnected_drive', '`undef', '`undefineall'), suffix=r'\b'), Comment.Preproc), (words(( - '$display', '$displayb', '$displayh', '$displayo', '$dumpall', '$dumpfile', - '$dumpflush', '$dumplimit', '$dumpoff', '$dumpon', '$dumpports', - '$dumpportsall', '$dumpportsflush', '$dumpportslimit', '$dumpportsoff', - '$dumpportson', '$dumpvars', '$fclose', '$fdisplay', '$fdisplayb', - '$fdisplayh', '$fdisplayo', '$feof', '$ferror', '$fflush', '$fgetc', - '$fgets', '$finish', '$fmonitor', '$fmonitorb', '$fmonitorh', '$fmonitoro', - '$fopen', '$fread', '$fscanf', '$fseek', '$fstrobe', '$fstrobeb', '$fstrobeh', + '$display', '$displayb', '$displayh', '$displayo', '$dumpall', + '$dumpfile', '$dumpflush', '$dumplimit', '$dumpoff', '$dumpon', + '$dumpports', '$dumpportsall', '$dumpportsflush', '$dumpportslimit', + '$dumpportsoff', '$dumpportson', '$dumpvars', '$fclose', + '$fdisplay', '$fdisplayb', '$fdisplayh', '$fdisplayo', '$feof', + '$ferror', '$fflush', '$fgetc', '$fgets', '$finish', '$fmonitor', + '$fmonitorb', '$fmonitorh', '$fmonitoro', '$fopen', '$fread', + '$fscanf', '$fseek', '$fstrobe', '$fstrobeb', '$fstrobeh', '$fstrobeo', '$ftell', '$fwrite', '$fwriteb', '$fwriteh', '$fwriteo', '$monitor', '$monitorb', '$monitorh', '$monitoro', '$monitoroff', - '$monitoron', '$plusargs', '$random', '$readmemb', '$readmemh', '$rewind', - '$sformat', '$sformatf', '$sscanf', '$strobe', '$strobeb', '$strobeh', '$strobeo', - '$swrite', '$swriteb', '$swriteh', '$swriteo', '$test', '$ungetc', - '$value$plusargs', '$write', '$writeb', '$writeh', '$writememb', - '$writememh', '$writeo'), suffix=r'\b'), + '$monitoron', '$plusargs', '$random', '$readmemb', '$readmemh', + '$rewind', '$sformat', '$sformatf', '$sscanf', '$strobe', + '$strobeb', '$strobeh', '$strobeo', '$swrite', '$swriteb', + '$swriteh', '$swriteo', '$test', '$ungetc', '$value$plusargs', + '$write', '$writeb', '$writeh', '$writememb', '$writememh', + '$writeo'), suffix=r'\b'), Name.Builtin), (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), @@ -276,15 +279,6 @@ class SystemVerilogLexer(RegexLexer): ] } - def get_tokens_unprocessed(self, text): - for index, token, value in \ - RegexLexer.get_tokens_unprocessed(self, text): - # Convention: mark all upper case names as constants - if token is Name: - if value.isupper(): - token = Name.Constant - yield index, token, value - class VhdlLexer(RegexLexer): """ diff --git a/pygments/lexers/html.py b/pygments/lexers/html.py index ae38167..cbef4f7 100644 --- a/pygments/lexers/html.py +++ b/pygments/lexers/html.py @@ -244,7 +244,7 @@ class XsltLexer(XmlLexer): filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc mimetypes = ['application/xsl+xml', 'application/xslt+xml'] - EXTRA_KEYWORDS = set(( + EXTRA_KEYWORDS = { 'apply-imports', 'apply-templates', 'attribute', 'attribute-set', 'call-template', 'choose', 'comment', 'copy', 'copy-of', 'decimal-format', 'element', 'fallback', @@ -253,7 +253,7 @@ class XsltLexer(XmlLexer): 'preserve-space', 'processing-instruction', 'sort', 'strip-space', 'stylesheet', 'template', 'text', 'transform', 'value-of', 'variable', 'when', 'with-param' - )) + } def get_tokens_unprocessed(self, text): for index, token, value in XmlLexer.get_tokens_unprocessed(self, text): diff --git a/pygments/lexers/javascript.py b/pygments/lexers/javascript.py index e61c451..e9cf672 100644 --- a/pygments/lexers/javascript.py +++ b/pygments/lexers/javascript.py @@ -372,6 +372,7 @@ class DartLexer(RegexLexer): (r'\b(bool|double|dynamic|int|num|Object|String|void)\b', Keyword.Type), (r'\b(false|null|true)\b', Keyword.Constant), (r'[~!%^&*+=|?:<>/-]|as\b', Operator), + (r'@[a-zA-Z_$]\w*', Name.Decorator), (r'[a-zA-Z_$]\w*:', Name.Label), (r'[a-zA-Z_$]\w*', Name), (r'[(){}\[\],.;]', Punctuation), @@ -1033,7 +1034,6 @@ class CoffeeScriptLexer(RegexLexer): filenames = ['*.coffee'] mimetypes = ['text/coffeescript'] - _operator_re = ( r'\+\+|~|&&|\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|\?|:|' r'\|\||\\(?=\n)|' @@ -1457,17 +1457,20 @@ class EarlGreyLexer(RegexLexer): (r'8r[0-7]+', Number.Oct), (r'2r[01]+', Number.Bin), (r'16r[a-fA-F0-9]+', Number.Hex), - (r'([3-79]|[12][0-9]|3[0-6])r[a-zA-Z\d]+(\.[a-zA-Z\d]+)?', Number.Radix), + (r'([3-79]|[12][0-9]|3[0-6])r[a-zA-Z\d]+(\.[a-zA-Z\d]+)?', + Number.Radix), (r'\d+', Number.Integer) ], } + class JuttleLexer(RegexLexer): """ For `Juttle`_ source code. .. _Juttle: https://github.com/juttle/juttle + .. versionadded:: 2.2 """ name = 'Juttle' @@ -1502,19 +1505,24 @@ class JuttleLexer(RegexLexer): r'(\d+(\.\d*)?|\.\d+)(ms|[smhdwMy])?):', String.Moment), (r':\d{4}-\d{2}-\d{2}(T\d{2}:\d{2}:\d{2}(\.\d*)?)?' r'(Z|[+-]\d{2}:\d{2}|[+-]\d{4})?:', String.Moment), - (r':((\d+(\.\d*)?|\.\d+)[ ]+)?(millisecond|second|minute|hour|day|week|month|year)[s]?' - r'(([ ]+and[ ]+(\d+[ ]+)?(millisecond|second|minute|hour|day|week|month|year)[s]?)' + (r':((\d+(\.\d*)?|\.\d+)[ ]+)?(millisecond|second|minute|hour|' + r'day|week|month|year)[s]?' + r'(([ ]+and[ ]+(\d+[ ]+)?(millisecond|second|minute|hour|' + r'day|week|month|year)[s]?)' r'|[ ]+(ago|from[ ]+now))*:', String.Moment), (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|' r'(==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'), (r'[{(\[;,]', Punctuation, 'slashstartsregex'), (r'[})\].]', Punctuation), (r'(import|return|continue|if|else)\b', Keyword, 'slashstartsregex'), - (r'(var|const|function|reducer|sub|input)\b', Keyword.Declaration, 'slashstartsregex'), + (r'(var|const|function|reducer|sub|input)\b', Keyword.Declaration, + 'slashstartsregex'), (r'(batch|emit|filter|head|join|keep|pace|pass|put|read|reduce|remove|' - r'sequence|skip|sort|split|tail|unbatch|uniq|view|write)\b', Keyword.Reserved), + r'sequence|skip|sort|split|tail|unbatch|uniq|view|write)\b', + Keyword.Reserved), (r'(true|false|null|Infinity)\b', Keyword.Constant), - (r'(Array|Date|Juttle|Math|Number|Object|RegExp|String)\b', Name.Builtin), + (r'(Array|Date|Juttle|Math|Number|Object|RegExp|String)\b', + Name.Builtin), (JS_IDENT, Name.Other), (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), (r'[0-9]+', Number.Integer), diff --git a/pygments/lexers/jvm.py b/pygments/lexers/jvm.py index d01b96f..5728e7c 100644 --- a/pygments/lexers/jvm.py +++ b/pygments/lexers/jvm.py @@ -26,7 +26,7 @@ __all__ = ['JavaLexer', 'ScalaLexer', 'GosuLexer', 'GosuTemplateLexer', class JavaLexer(RegexLexer): """ - For `Java `_ source code. + For `Java `_ source code. """ name = 'Java' @@ -50,7 +50,7 @@ class JavaLexer(RegexLexer): (r'((?:(?:[^\W\d]|\$)[\w.\[\]$<>]*\s+)+?)' # return arguments r'((?:[^\W\d]|\$)[\w$]*)' # method name r'(\s*)(\()', # signature start - bygroups(using(this), Name.Function, Text, Operator)), + bygroups(using(this), Name.Function, Text, Punctuation)), (r'@[^\W\d][\w.]*', Name.Decorator), (r'(abstract|const|enum|extends|final|implements|native|private|' r'protected|public|static|strictfp|super|synchronized|throws|' @@ -61,11 +61,14 @@ class JavaLexer(RegexLexer): (r'(true|false|null)\b', Keyword.Constant), (r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Text), 'class'), + (r'(var)(\s+)', bygroups(Keyword.Declaration, Text), + 'var'), (r'(import(?:\s+static)?)(\s+)', bygroups(Keyword.Namespace, Text), 'import'), (r'"(\\\\|\\"|[^"])*"', String), (r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char), - (r'(\.)((?:[^\W\d]|\$)[\w$]*)', bygroups(Operator, Name.Attribute)), + (r'(\.)((?:[^\W\d]|\$)[\w$]*)', bygroups(Punctuation, + Name.Attribute)), (r'^\s*([^\W\d]|\$)[\w$]*:', Name.Label), (r'([^\W\d]|\$)[\w$]*', Name), (r'([0-9][0-9_]*\.([0-9][0-9_]*)?|' @@ -80,12 +83,16 @@ class JavaLexer(RegexLexer): (r'0[bB][01][01_]*[lL]?', Number.Bin), (r'0[0-7_]+[lL]?', Number.Oct), (r'0|[1-9][0-9_]*[lL]?', Number.Integer), - (r'[~^*!%&\[\](){}<>|+=:;,./?-]', Operator), + (r'[~^*!%&\[\]<>|+=/?-]', Operator), + (r'[{}();:.,]', Punctuation), (r'\n', Text) ], 'class': [ (r'([^\W\d]|\$)[\w$]*', Name.Class, '#pop') ], + 'var': [ + (r'([^\W\d]|\$)[\w$]*', Name, '#pop') + ], 'import': [ (r'[\w.]+\*?', Name.Namespace, '#pop') ], @@ -104,7 +111,7 @@ class AspectJLexer(JavaLexer): filenames = ['*.aj'] mimetypes = ['text/x-aspectj'] - aj_keywords = set(( + aj_keywords = { 'aspect', 'pointcut', 'privileged', 'call', 'execution', 'initialization', 'preinitialization', 'handler', 'get', 'set', 'staticinitialization', 'target', 'args', 'within', 'withincode', @@ -114,9 +121,9 @@ class AspectJLexer(JavaLexer): 'thisJoinPointStaticPart', 'thisEnclosingJoinPointStaticPart', 'issingleton', 'perthis', 'pertarget', 'percflow', 'percflowbelow', 'pertypewithin', 'lock', 'unlock', 'thisAspectInstance' - )) - aj_inter_type = set(('parents:', 'warning:', 'error:', 'soft:', 'precedence:')) - aj_inter_type_annotation = set(('@type', '@method', '@constructor', '@field')) + } + aj_inter_type = {'parents:', 'warning:', 'error:', 'soft:', 'precedence:'} + aj_inter_type_annotation = {'@type', '@method', '@constructor', '@field'} def get_tokens_unprocessed(self, text): for index, token, value in JavaLexer.get_tokens_unprocessed(self, text): diff --git a/pygments/lexers/lisp.py b/pygments/lexers/lisp.py index 169d7a9..601d5a5 100644 --- a/pygments/lexers/lisp.py +++ b/pygments/lexers/lisp.py @@ -1554,7 +1554,7 @@ class EmacsLispLexer(RegexLexer): # Take a deep breath... symbol = r'((?:%s)(?:%s)*)' % (nonmacro, constituent) - macros = set(( + macros = { 'atomic-change-group', 'case', 'block', 'cl-block', 'cl-callf', 'cl-callf2', 'cl-case', 'cl-decf', 'cl-declaim', 'cl-declare', 'cl-define-compiler-macro', 'cl-defmacro', 'cl-defstruct', @@ -1601,17 +1601,17 @@ class EmacsLispLexer(RegexLexer): 'with-tramp-file-property', 'with-tramp-progress-reporter', 'with-wrapper-hook', 'load-time-value', 'locally', 'macrolet', 'progv', 'return-from', - )) + } - special_forms = set(( + special_forms = { 'and', 'catch', 'cond', 'condition-case', 'defconst', 'defvar', 'function', 'if', 'interactive', 'let', 'let*', 'or', 'prog1', 'prog2', 'progn', 'quote', 'save-current-buffer', 'save-excursion', 'save-restriction', 'setq', 'setq-default', 'subr-arity', 'unwind-protect', 'while', - )) + } - builtin_function = set(( + builtin_function = { '%', '*', '+', '-', '/', '/=', '1+', '1-', '<', '<=', '=', '>', '>=', 'Snarf-documentation', 'abort-recursive-edit', 'abs', 'accept-process-output', 'access-file', 'accessible-keymaps', 'acos', @@ -1937,8 +1937,9 @@ class EmacsLispLexer(RegexLexer): 'split-window-internal', 'sqrt', 'standard-case-table', 'standard-category-table', 'standard-syntax-table', 'start-kbd-macro', 'start-process', 'stop-process', 'store-kbd-macro-event', 'string', - 'string-as-multibyte', 'string-as-unibyte', 'string-bytes', - 'string-collate-equalp', 'string-collate-lessp', 'string-equal', + 'string=', 'string<', 'string>', 'string-as-multibyte', + 'string-as-unibyte', 'string-bytes', 'string-collate-equalp', + 'string-collate-lessp', 'string-equal', 'string-greaterp', 'string-lessp', 'string-make-multibyte', 'string-make-unibyte', 'string-match', 'string-to-char', 'string-to-multibyte', 'string-to-number', 'string-to-syntax', 'string-to-unibyte', @@ -2050,23 +2051,23 @@ class EmacsLispLexer(RegexLexer): 'xw-color-values', 'xw-display-color-p', 'xw-display-color-p', 'yes-or-no-p', 'zlib-available-p', 'zlib-decompress-region', 'forward-point', - )) + } - builtin_function_highlighted = set(( + builtin_function_highlighted = { 'defvaralias', 'provide', 'require', 'with-no-warnings', 'define-widget', 'with-electric-help', 'throw', 'defalias', 'featurep' - )) + } - lambda_list_keywords = set(( + lambda_list_keywords = { '&allow-other-keys', '&aux', '&body', '&environment', '&key', '&optional', '&rest', '&whole', - )) + } - error_keywords = set(( + error_keywords = { 'cl-assert', 'cl-check-type', 'error', 'signal', 'user-error', 'warn', - )) + } def get_tokens_unprocessed(self, text): stack = ['root'] @@ -2225,7 +2226,7 @@ class ShenLexer(RegexLexer): BUILTINS_ANYWHERE = ('where', 'skip', '>>', '_', '!', '', '') - MAPPINGS = dict((s, Keyword) for s in DECLARATIONS) + MAPPINGS = {s: Keyword for s in DECLARATIONS} MAPPINGS.update((s, Name.Builtin) for s in BUILTINS) MAPPINGS.update((s, Keyword) for s in SPECIAL_FORMS) diff --git a/pygments/lexers/matlab.py b/pygments/lexers/matlab.py index fd3897a..ee85d08 100644 --- a/pygments/lexers/matlab.py +++ b/pygments/lexers/matlab.py @@ -72,6 +72,8 @@ class MatlabLexer(RegexLexer): "hilb", "invhilb", "magic", "pascal", "rosser", "toeplitz", "vander", "wilkinson") + _operators = r'-|==|~=|<=|>=|<|>|&&|&|~|\|\|?|\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\' + tokens = { 'root': [ # line starting with '!' is sent as a system command. not sure what @@ -79,7 +81,7 @@ class MatlabLexer(RegexLexer): (r'^!.*', String.Other), (r'%\{\s*\n', Comment.Multiline, 'blockcomment'), (r'%.*$', Comment), - (r'^\s*function', Keyword, 'deffunc'), + (r'^\s*function\b', Keyword, 'deffunc'), # from 'iskeyword' on version 7.11 (R2010): (words(( @@ -94,10 +96,22 @@ class MatlabLexer(RegexLexer): # line continuation with following comment: (r'\.\.\..*$', Comment), + # command form: + # "How MATLAB Recognizes Command Syntax" specifies that an operator + # is recognized if it is either surrounded by spaces or by no + # spaces on both sides; only the former case matters for us. (This + # allows distinguishing `cd ./foo` from `cd ./ foo`.) + (r'(?:^|(?<=;))\s*\w+\s+(?!=|\(|(%s)\s+)' % _operators, Name, + 'commandargs'), + # operators: - (r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator), - # operators requiring escape for re: - (r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator), + (_operators, Operator), + + # numbers (must come before punctuation to handle `.5`; cannot use + # `\b` due to e.g. `5. + .5`). + (r'(?', '->', '#', # Modules ':>', - )) + } - nonid_reserved = set(('(', ')', '[', ']', '{', '}', ',', ';', '...', '_')) + nonid_reserved = {'(', ')', '[', ']', '{', '}', ',', ';', '...', '_'} alphanumid_re = r"[a-zA-Z][\w']*" symbolicid_re = r"[!%&$#+\-/:<=>?@\\~`^|*]+" diff --git a/pygments/lexers/pascal.py b/pygments/lexers/pascal.py index 251bca2..0a8dd7d 100644 --- a/pygments/lexers/pascal.py +++ b/pygments/lexers/pascal.py @@ -68,29 +68,29 @@ class DelphiLexer(Lexer): 'dispose', 'exit', 'false', 'new', 'true' ) - BLOCK_KEYWORDS = set(( + BLOCK_KEYWORDS = { 'begin', 'class', 'const', 'constructor', 'destructor', 'end', 'finalization', 'function', 'implementation', 'initialization', 'label', 'library', 'operator', 'procedure', 'program', 'property', 'record', 'threadvar', 'type', 'unit', 'uses', 'var' - )) + } - FUNCTION_MODIFIERS = set(( + FUNCTION_MODIFIERS = { 'alias', 'cdecl', 'export', 'inline', 'interrupt', 'nostackframe', 'pascal', 'register', 'safecall', 'softfloat', 'stdcall', 'varargs', 'name', 'dynamic', 'near', 'virtual', 'external', 'override', 'assembler' - )) + } # XXX: those aren't global. but currently we know no way for defining # them just for the type context. - DIRECTIVES = set(( + DIRECTIVES = { 'absolute', 'abstract', 'assembler', 'cppdecl', 'default', 'far', 'far16', 'forward', 'index', 'oldfpccall', 'private', 'protected', 'published', 'public' - )) + } - BUILTIN_TYPES = set(( + BUILTIN_TYPES = { 'ansichar', 'ansistring', 'bool', 'boolean', 'byte', 'bytebool', 'cardinal', 'char', 'comp', 'currency', 'double', 'dword', 'extended', 'int64', 'integer', 'iunknown', 'longbool', 'longint', @@ -104,7 +104,7 @@ class DelphiLexer(Lexer): 'shortstring', 'single', 'smallint', 'string', 'tclass', 'tdate', 'tdatetime', 'textfile', 'thandle', 'tobject', 'ttime', 'variant', 'widechar', 'widestring', 'word', 'wordbool' - )) + } BUILTIN_UNITS = { 'System': ( @@ -246,7 +246,7 @@ class DelphiLexer(Lexer): ) } - ASM_REGISTERS = set(( + ASM_REGISTERS = { 'ah', 'al', 'ax', 'bh', 'bl', 'bp', 'bx', 'ch', 'cl', 'cr0', 'cr1', 'cr2', 'cr3', 'cr4', 'cs', 'cx', 'dh', 'di', 'dl', 'dr0', 'dr1', 'dr2', 'dr3', 'dr4', 'dr5', 'dr6', 'dr7', 'ds', 'dx', @@ -255,9 +255,9 @@ class DelphiLexer(Lexer): 'mm7', 'si', 'sp', 'ss', 'st0', 'st1', 'st2', 'st3', 'st4', 'st5', 'st6', 'st7', 'xmm0', 'xmm1', 'xmm2', 'xmm3', 'xmm4', 'xmm5', 'xmm6', 'xmm7' - )) + } - ASM_INSTRUCTIONS = set(( + ASM_INSTRUCTIONS = { 'aaa', 'aad', 'aam', 'aas', 'adc', 'add', 'and', 'arpl', 'bound', 'bsf', 'bsr', 'bswap', 'bt', 'btc', 'btr', 'bts', 'call', 'cbw', 'cdq', 'clc', 'cld', 'cli', 'clts', 'cmc', 'cmova', 'cmovae', @@ -296,7 +296,7 @@ class DelphiLexer(Lexer): 'sysret', 'test', 'ud1', 'ud2', 'umov', 'verr', 'verw', 'wait', 'wbinvd', 'wrmsr', 'wrshr', 'xadd', 'xbts', 'xchg', 'xlat', 'xlatb', 'xor' - )) + } def __init__(self, **options): Lexer.__init__(self, **options) diff --git a/pygments/lexers/pawn.py b/pygments/lexers/pawn.py index 576df42..3cdfbd0 100644 --- a/pygments/lexers/pawn.py +++ b/pygments/lexers/pawn.py @@ -86,25 +86,25 @@ class SourcePawnLexer(RegexLexer): ] } - SM_TYPES = set(('Action', 'bool', 'Float', 'Plugin', 'String', 'any', - 'AdminFlag', 'OverrideType', 'OverrideRule', 'ImmunityType', - 'GroupId', 'AdminId', 'AdmAccessMode', 'AdminCachePart', - 'CookieAccess', 'CookieMenu', 'CookieMenuAction', 'NetFlow', - 'ConVarBounds', 'QueryCookie', 'ReplySource', - 'ConVarQueryResult', 'ConVarQueryFinished', 'Function', - 'Action', 'Identity', 'PluginStatus', 'PluginInfo', 'DBResult', - 'DBBindType', 'DBPriority', 'PropType', 'PropFieldType', - 'MoveType', 'RenderMode', 'RenderFx', 'EventHookMode', - 'EventHook', 'FileType', 'FileTimeMode', 'PathType', - 'ParamType', 'ExecType', 'DialogType', 'Handle', 'KvDataTypes', - 'NominateResult', 'MapChange', 'MenuStyle', 'MenuAction', - 'MenuSource', 'RegexError', 'SDKCallType', 'SDKLibrary', - 'SDKFuncConfSource', 'SDKType', 'SDKPassMethod', 'RayType', - 'TraceEntityFilter', 'ListenOverride', 'SortOrder', 'SortType', - 'SortFunc2D', 'APLRes', 'FeatureType', 'FeatureStatus', - 'SMCResult', 'SMCError', 'TFClassType', 'TFTeam', 'TFCond', - 'TFResourceType', 'Timer', 'TopMenuAction', 'TopMenuObjectType', - 'TopMenuPosition', 'TopMenuObject', 'UserMsg')) + SM_TYPES = {'Action', 'bool', 'Float', 'Plugin', 'String', 'any', + 'AdminFlag', 'OverrideType', 'OverrideRule', 'ImmunityType', + 'GroupId', 'AdminId', 'AdmAccessMode', 'AdminCachePart', + 'CookieAccess', 'CookieMenu', 'CookieMenuAction', 'NetFlow', + 'ConVarBounds', 'QueryCookie', 'ReplySource', + 'ConVarQueryResult', 'ConVarQueryFinished', 'Function', + 'Action', 'Identity', 'PluginStatus', 'PluginInfo', 'DBResult', + 'DBBindType', 'DBPriority', 'PropType', 'PropFieldType', + 'MoveType', 'RenderMode', 'RenderFx', 'EventHookMode', + 'EventHook', 'FileType', 'FileTimeMode', 'PathType', + 'ParamType', 'ExecType', 'DialogType', 'Handle', 'KvDataTypes', + 'NominateResult', 'MapChange', 'MenuStyle', 'MenuAction', + 'MenuSource', 'RegexError', 'SDKCallType', 'SDKLibrary', + 'SDKFuncConfSource', 'SDKType', 'SDKPassMethod', 'RayType', + 'TraceEntityFilter', 'ListenOverride', 'SortOrder', 'SortType', + 'SortFunc2D', 'APLRes', 'FeatureType', 'FeatureStatus', + 'SMCResult', 'SMCError', 'TFClassType', 'TFTeam', 'TFCond', + 'TFResourceType', 'Timer', 'TopMenuAction', 'TopMenuObjectType', + 'TopMenuPosition', 'TopMenuObject', 'UserMsg'} def __init__(self, **options): self.smhighlighting = get_bool_opt(options, diff --git a/pygments/lexers/praat.py b/pygments/lexers/praat.py index fa91880..4a6a14f 100644 --- a/pygments/lexers/praat.py +++ b/pygments/lexers/praat.py @@ -55,7 +55,7 @@ class PraatLexer(RegexLexer): 'exitScript', 'exp', 'extractNumber', 'fileReadable', 'fisherP', 'fisherQ', 'floor', 'gaussP', 'gaussQ', 'hertzToBark', 'hertzToErb', 'hertzToMel', 'hertzToSemitones', 'imax', 'imin', 'incompleteBeta', 'incompleteGammaP', 'index', - 'index_regex', 'invBinomialP', 'invBinomialQ', 'invChiSquareQ', 'invFisherQ', + 'index_regex', 'integer', 'invBinomialP', 'invBinomialQ', 'invChiSquareQ', 'invFisherQ', 'invGaussQ', 'invSigmoid', 'invStudentQ', 'length', 'ln', 'lnBeta', 'lnGamma', 'log10', 'log2', 'max', 'melToHertz', 'min', 'minusObject', 'natural', 'number', 'numberOfColumns', 'numberOfRows', 'numberOfSelected', 'objectsAreIdentical', @@ -63,9 +63,9 @@ class PraatLexer(RegexLexer): 'positive', 'randomBinomial', 'randomGauss', 'randomInteger', 'randomPoisson', 'randomUniform', 'real', 'readFile', 'removeObject', 'rindex', 'rindex_regex', 'round', 'runScript', 'runSystem', 'runSystem_nocheck', 'selectObject', - 'selected', 'semitonesToHertz', 'sentencetext', 'sigmoid', 'sin', 'sinc', + 'selected', 'semitonesToHertz', 'sentence', 'sentencetext', 'sigmoid', 'sin', 'sinc', 'sincpi', 'sinh', 'soundPressureToPhon', 'sqrt', 'startsWith', 'studentP', - 'studentQ', 'tan', 'tanh', 'variableExists', 'word', 'writeFile', 'writeFileLine', + 'studentQ', 'tan', 'tanh', 'text', 'variableExists', 'word', 'writeFile', 'writeFileLine', 'writeInfo', 'writeInfoLine', ) @@ -90,9 +90,9 @@ class PraatLexer(RegexLexer): 'KNN', 'KlattGrid', 'KlattTable', 'LFCC', 'LPC', 'Label', 'LegendreSeries', 'LinearRegression', 'LogisticRegression', 'LongSound', 'Ltas', 'MFCC', 'MSpline', 'ManPages', 'Manipulation', 'Matrix', 'MelFilter', 'MelSpectrogram', - 'MixingMatrix', 'Movie', 'Network', 'OTGrammar', 'OTHistory', 'OTMulti', 'PCA', - 'PairDistribution', 'ParamCurve', 'Pattern', 'Permutation', 'Photo', 'Pitch', - 'PitchModeler', 'PitchTier', 'PointProcess', 'Polygon', 'Polynomial', + 'MixingMatrix', 'Movie', 'Network', 'Object', 'OTGrammar', 'OTHistory', 'OTMulti', + 'PCA', 'PairDistribution', 'ParamCurve', 'Pattern', 'Permutation', 'Photo', + 'Pitch', 'PitchModeler', 'PitchTier', 'PointProcess', 'Polygon', 'Polynomial', 'PowerCepstrogram', 'PowerCepstrum', 'Procrustes', 'RealPoint', 'RealTier', 'ResultsMFC', 'Roots', 'SPINET', 'SSCP', 'SVD', 'Salience', 'ScalarProduct', 'Similarity', 'SimpleString', 'SortedSetOfString', 'Sound', 'Speaker', @@ -112,6 +112,10 @@ class PraatLexer(RegexLexer): 'defaultDirectory', ) + object_attributes = ( + 'ncol', 'nrow', 'xmin', 'ymin', 'xmax', 'ymax', 'nx', 'ny', 'dx', 'dy', + ) + tokens = { 'root': [ (r'(\s+)(#.*?$)', bygroups(Text, Comment.Single)), @@ -148,7 +152,9 @@ class PraatLexer(RegexLexer): ], 'command': [ (r'( ?[\w()-]+ ?)', Keyword), - (r"'(?=.*')", String.Interpol, 'string_interpolated'), + + include('string_interpolated'), + (r'\.{3}', Keyword, ('#pop', 'old_arguments')), (r':', Keyword, ('#pop', 'comma_list')), (r'\s', Text, '#pop'), @@ -207,50 +213,49 @@ class PraatLexer(RegexLexer): (r'\n', Text, '#pop'), (r'\b\d+(\.\d*)?([eE][-+]?\d+)?%?', Number), ], - 'object_attributes': [ - (r'\.?(n(col|row)|[xy]min|[xy]max|[nd][xy])\b', Name.Builtin, '#pop'), - (r'(\.?(?:col|row)\$)(\[)', - bygroups(Name.Builtin, Text), 'variable_name'), - (r'(\$?)(\[)', - bygroups(Name.Builtin, Text), ('#pop', 'comma_list')), + 'object_reference': [ + include('string_interpolated'), + (r'([a-z][a-zA-Z0-9_]*|\d+)', Name.Builtin), + + (words(object_attributes, prefix=r'\.'), Name.Builtin, '#pop'), + + (r'\$', Name.Builtin), + (r'\[', Text, '#pop'), ], 'variable_name': [ include('operator'), include('number'), (words(variables_string, suffix=r'\$'), Name.Variable.Global), - (words(variables_numeric, suffix=r'\b'), Name.Variable.Global), - - (r'\bObject_\w+', Name.Builtin, 'object_attributes'), - (words(objects, prefix=r'\b', suffix=r'_\w+'), - Name.Builtin, 'object_attributes'), + (words(variables_numeric, + suffix=r'(?=[^a-zA-Z0-9\._"\'\$#\[:\(]|\s|^|$)'), + Name.Variable.Global), - (r"\b(Object_)(')", - bygroups(Name.Builtin, String.Interpol), - ('object_attributes', 'string_interpolated')), - (words(objects, prefix=r'\b', suffix=r"(_)(')"), - bygroups(Name.Builtin, Name.Builtin, String.Interpol), - ('object_attributes', 'string_interpolated')), + (words(objects, prefix=r'\b', suffix=r"(_)"), + bygroups(Name.Builtin, Name.Builtin), + 'object_reference'), (r'\.?_?[a-z][\w.]*(\$|#)?', Text), (r'[\[\]]', Punctuation, 'comma_list'), - (r"'(?=.*')", String.Interpol, 'string_interpolated'), + + include('string_interpolated'), ], 'operator': [ (r'([+\/*<>=!-]=?|[&*|][&*|]?|\^|<>)', Operator), (r'(?`_ source code. + For `Python `_ source code (version 3.x). + + .. versionadded:: 0.10 + + .. versionchanged:: 2.5 + This is now the default ``PythonLexer``. It is still available as the + alias ``Python3Lexer``. """ name = 'Python' - aliases = ['python', 'py', 'sage'] - filenames = ['*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac', '*.sage'] - mimetypes = ['text/x-python', 'application/x-python'] + aliases = ['python', 'py', 'sage', 'python3', 'py3'] + filenames = [ + '*.py', + '*.pyw', + # Jython + '*.jy', + # Sage + '*.sage', + # SCons + '*.sc', + 'SConstruct', + 'SConscript', + # Skylark/Starlark (used by Bazel, Buck, and Pants) + '*.bzl', + 'BUCK', + 'BUILD', + 'BUILD.bazel', + 'WORKSPACE', + # Twisted Application infrastructure + '*.tac', + ] + mimetypes = ['text/x-python', 'application/x-python', + 'text/x-python3', 'application/x-python3'] + + flags = re.MULTILINE | re.UNICODE + + uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue) + + def innerstring_rules(ttype): + return [ + # the old style '%s' % (...) string formatting (still valid in Py3) + (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' + '[hlL]?[E-GXc-giorsaux%]', String.Interpol), + # the new style '{}'.format(...) string formatting + (r'\{' + r'((\w+)((\.\w+)|(\[[^\]]+\]))*)?' # field name + r'(\![sra])?' # conversion + r'(\:(.?[<>=\^])?[-+ ]?#?0?(\d+)?,?(\.\d+)?[E-GXb-gnosx%]?)?' + r'\}', String.Interpol), + + # backslashes, quotes and formatting signs must be parsed one at a time + (r'[^\\\'"%{\n]+', ttype), + (r'[\'"\\]', ttype), + # unhandled string formatting sign + (r'%|(\{{1,2})', ttype) + # newlines are an error (use "nl" state) + ] + + tokens = { + 'root': [ + (r'\n', Text), + (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")', + bygroups(Text, String.Affix, String.Doc)), + (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')", + bygroups(Text, String.Affix, String.Doc)), + (r'[^\S\n]+', Text), + (r'\A#!.+$', Comment.Hashbang), + (r'#.*$', Comment.Single), + (r'[]{}:(),;[]', Punctuation), + (r'\\\n', Text), + (r'\\', Text), + (r'(in|is|and|or|not)\b', Operator.Word), + (r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator), + include('keywords'), + (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'), + (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'), + (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), + 'fromimport'), + (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), + 'import'), + include('builtins'), + include('magicfuncs'), + include('magicvars'), + # raw strings + ('(?i)(rb|br|fr|rf|r)(""")', + bygroups(String.Affix, String.Double), 'tdqs'), + ("(?i)(rb|br|fr|rf|r)(''')", + bygroups(String.Affix, String.Single), 'tsqs'), + ('(?i)(rb|br|fr|rf|r)(")', + bygroups(String.Affix, String.Double), 'dqs'), + ("(?i)(rb|br|fr|rf|r)(')", + bygroups(String.Affix, String.Single), 'sqs'), + # non-raw strings + ('([uUbBfF]?)(""")', bygroups(String.Affix, String.Double), + combined('stringescape', 'tdqs')), + ("([uUbBfF]?)(''')", bygroups(String.Affix, String.Single), + combined('stringescape', 'tsqs')), + ('([uUbBfF]?)(")', bygroups(String.Affix, String.Double), + combined('stringescape', 'dqs')), + ("([uUbBfF]?)(')", bygroups(String.Affix, String.Single), + combined('stringescape', 'sqs')), + include('name'), + include('numbers'), + ], + 'keywords': [ + (words(( + 'assert', 'async', 'await', 'break', 'continue', 'del', 'elif', + 'else', 'except', 'finally', 'for', 'global', 'if', 'lambda', + 'pass', 'raise', 'nonlocal', 'return', 'try', 'while', 'yield', + 'yield from', 'as', 'with'), suffix=r'\b'), + Keyword), + (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant), + ], + 'builtins': [ + (words(( + '__import__', 'abs', 'all', 'any', 'bin', 'bool', 'bytearray', + 'bytes', 'chr', 'classmethod', 'cmp', 'compile', 'complex', + 'delattr', 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'filter', + 'float', 'format', 'frozenset', 'getattr', 'globals', 'hasattr', + 'hash', 'hex', 'id', 'input', 'int', 'isinstance', 'issubclass', + 'iter', 'len', 'list', 'locals', 'map', 'max', 'memoryview', + 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'print', + 'property', 'range', 'repr', 'reversed', 'round', 'set', 'setattr', + 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', + 'type', 'vars', 'zip'), prefix=r'(?`_ source code. + + .. versionchanged:: 2.5 + This class has been renamed from ``PythonLexer``. ``PythonLexer`` now + refers to the Python 3 variant. File name patterns like ``*.py`` have + been moved to Python 3 as well. + """ + + name = 'Python 2.x' + aliases = ['python2', 'py2'] + filenames = [] # now taken over by PythonLexer (3.x) + mimetypes = ['text/x-python2', 'application/x-python2'] def innerstring_rules(ttype): return [ @@ -124,15 +391,15 @@ class PythonLexer(RegexLexer): 'Exception', 'FloatingPointError', 'FutureWarning', 'GeneratorExit', 'IOError', 'ImportError', 'ImportWarning', 'IndentationError', 'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError', - 'MemoryError', 'ModuleNotFoundError', 'NameError', 'NotImplemented', 'NotImplementedError', - 'OSError', 'OverflowError', 'OverflowWarning', 'PendingDeprecationWarning', - 'RecursionError', 'ReferenceError', 'RuntimeError', 'RuntimeWarning', 'StandardError', - 'StopIteration', 'StopAsyncIteration', 'SyntaxError', 'SyntaxWarning', 'SystemError', - 'SystemExit', 'TabError', 'TypeError', 'UnboundLocalError', - 'UnicodeDecodeError', 'UnicodeEncodeError', 'UnicodeError', - 'UnicodeTranslateError', 'UnicodeWarning', 'UserWarning', - 'ValueError', 'VMSError', 'Warning', 'WindowsError', - 'ZeroDivisionError'), prefix=r'(?`_ source code (version 3.0). - - .. versionadded:: 0.10 - """ - - name = 'Python 3' - aliases = ['python3', 'py3'] - filenames = [] # Nothing until Python 3 gets widespread - mimetypes = ['text/x-python3', 'application/x-python3'] - - flags = re.MULTILINE | re.UNICODE - - uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue) - - def innerstring_rules(ttype): - return [ - # the old style '%s' % (...) string formatting (still valid in Py3) - (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' - '[hlL]?[E-GXc-giorsaux%]', String.Interpol), - # the new style '{}'.format(...) string formatting - (r'\{' - r'((\w+)((\.\w+)|(\[[^\]]+\]))*)?' # field name - r'(\![sra])?' # conversion - r'(\:(.?[<>=\^])?[-+ ]?#?0?(\d+)?,?(\.\d+)?[E-GXb-gnosx%]?)?' - r'\}', String.Interpol), - - # backslashes, quotes and formatting signs must be parsed one at a time - (r'[^\\\'"%{\n]+', ttype), - (r'[\'"\\]', ttype), - # unhandled string formatting sign - (r'%|(\{{1,2})', ttype) - # newlines are an error (use "nl" state) - ] - - tokens = PythonLexer.tokens.copy() - tokens['keywords'] = [ - (words(( - 'assert', 'async', 'await', 'break', 'continue', 'del', 'elif', - 'else', 'except', 'finally', 'for', 'global', 'if', 'lambda', 'pass', - 'raise', 'nonlocal', 'return', 'try', 'while', 'yield', 'yield from', - 'as', 'with'), suffix=r'\b'), - Keyword), - (words(( - 'True', 'False', 'None'), suffix=r'\b'), - Keyword.Constant), - ] - tokens['builtins'] = [ - (words(( - '__import__', 'abs', 'all', 'any', 'bin', 'bool', 'bytearray', 'bytes', - 'chr', 'classmethod', 'cmp', 'compile', 'complex', 'delattr', 'dict', - 'dir', 'divmod', 'enumerate', 'eval', 'filter', 'float', 'format', - 'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id', - 'input', 'int', 'isinstance', 'issubclass', 'iter', 'len', 'list', - 'locals', 'map', 'max', 'memoryview', 'min', 'next', 'object', 'oct', - 'open', 'ord', 'pow', 'print', 'property', 'range', 'repr', 'reversed', - 'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod', 'str', - 'sum', 'super', 'tuple', 'type', 'vars', 'zip'), prefix=r'(?`_ shape expressions language syntax. + """ + name = 'ShExC' + aliases = ['shexc', 'shex'] + filenames = ['*.shex'] + mimetypes = ['text/shex'] + + # character group definitions :: + + PN_CHARS_BASE_GRP = (u'a-zA-Z' + u'\u00c0-\u00d6' + u'\u00d8-\u00f6' + u'\u00f8-\u02ff' + u'\u0370-\u037d' + u'\u037f-\u1fff' + u'\u200c-\u200d' + u'\u2070-\u218f' + u'\u2c00-\u2fef' + u'\u3001-\ud7ff' + u'\uf900-\ufdcf' + u'\ufdf0-\ufffd') + + PN_CHARS_U_GRP = (PN_CHARS_BASE_GRP + '_') + + PN_CHARS_GRP = (PN_CHARS_U_GRP + + r'\-' + + r'0-9' + + u'\u00b7' + + u'\u0300-\u036f' + + u'\u203f-\u2040') + + HEX_GRP = '0-9A-Fa-f' + + PN_LOCAL_ESC_CHARS_GRP = r"_~.\-!$&'()*+,;=/?#@%" + + # terminal productions :: + + PN_CHARS_BASE = '[' + PN_CHARS_BASE_GRP + ']' + + PN_CHARS_U = '[' + PN_CHARS_U_GRP + ']' + + PN_CHARS = '[' + PN_CHARS_GRP + ']' + + HEX = '[' + HEX_GRP + ']' + + PN_LOCAL_ESC_CHARS = '[' + PN_LOCAL_ESC_CHARS_GRP + ']' + + UCHAR_NO_BACKSLASH = '(?:u' + HEX + '{4}|U' + HEX + '{8})' + + UCHAR = r'\\' + UCHAR_NO_BACKSLASH + + IRIREF = r'<(?:[^\x00-\x20<>"{}|^`\\]|' + UCHAR + ')*>' + + BLANK_NODE_LABEL = '_:[0-9' + PN_CHARS_U_GRP + '](?:[' + PN_CHARS_GRP + \ + '.]*' + PN_CHARS + ')?' + + PN_PREFIX = PN_CHARS_BASE + '(?:[' + PN_CHARS_GRP + '.]*' + PN_CHARS + ')?' + + PERCENT = '%' + HEX + HEX + + PN_LOCAL_ESC = r'\\' + PN_LOCAL_ESC_CHARS + + PLX = '(?:' + PERCENT + ')|(?:' + PN_LOCAL_ESC + ')' + + PN_LOCAL = ('(?:[' + PN_CHARS_U_GRP + ':0-9' + ']|' + PLX + ')' + + '(?:(?:[' + PN_CHARS_GRP + '.:]|' + PLX + ')*(?:[' + + PN_CHARS_GRP + ':]|' + PLX + '))?') + + EXPONENT = r'[eE][+-]?\d+' + + # Lexer token definitions :: + + tokens = { + 'root': [ + (r'\s+', Text), + # keywords :: + (r'(?i)(base|prefix|start|external|' + r'literal|iri|bnode|nonliteral|length|minlength|maxlength|' + r'mininclusive|minexclusive|maxinclusive|maxexclusive|' + r'totaldigits|fractiondigits|' + r'closed|extra)\b', Keyword), + (r'(a)\b', Keyword), + # IRIs :: + ('(' + IRIREF + ')', Name.Label), + # blank nodes :: + ('(' + BLANK_NODE_LABEL + ')', Name.Label), + # prefixed names :: + (r'(' + PN_PREFIX + r')?(\:)(' + PN_LOCAL + ')?', + bygroups(Name.Namespace, Punctuation, Name.Tag)), + # boolean literals :: + (r'(true|false)', Keyword.Constant), + # double literals :: + (r'[+\-]?(\d+\.\d*' + EXPONENT + r'|\.?\d+' + EXPONENT + ')', Number.Float), + # decimal literals :: + (r'[+\-]?(\d+\.\d*|\.\d+)', Number.Float), + # integer literals :: + (r'[+\-]?\d+', Number.Integer), + # operators :: + (r'[@|$&=*+?^\-~]', Operator), + # operator keywords :: + (r'(?i)(and|or|not)\b', Operator.Word), + # punctuation characters :: + (r'[(){}.;,:^\[\]]', Punctuation), + # line comments :: + (r'#[^\n]*', Comment), + # strings :: + (r'"""', String, 'triple-double-quoted-string'), + (r'"', String, 'single-double-quoted-string'), + (r"'''", String, 'triple-single-quoted-string'), + (r"'", String, 'single-single-quoted-string'), + ], + 'triple-double-quoted-string': [ + (r'"""', String, 'end-of-string'), + (r'[^\\]+', String), + (r'\\', String, 'string-escape'), + ], + 'single-double-quoted-string': [ + (r'"', String, 'end-of-string'), + (r'[^"\\\n]+', String), + (r'\\', String, 'string-escape'), + ], + 'triple-single-quoted-string': [ + (r"'''", String, 'end-of-string'), + (r'[^\\]+', String), + (r'\\', String.Escape, 'string-escape'), + ], + 'single-single-quoted-string': [ + (r"'", String, 'end-of-string'), + (r"[^'\\\n]+", String), + (r'\\', String, 'string-escape'), + ], + 'string-escape': [ + (UCHAR_NO_BACKSLASH, String.Escape, '#pop'), + (r'.', String.Escape, '#pop'), + ], + 'end-of-string': [ + (r'(@)([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)', + bygroups(Operator, Name.Function), '#pop:2'), + (r'\^\^', Operator, '#pop:2'), + default('#pop:2'), + ], + } diff --git a/pygments/lexers/resource.py b/pygments/lexers/resource.py index 6cc88b9..ccd4e5f 100644 --- a/pygments/lexers/resource.py +++ b/pygments/lexers/resource.py @@ -26,7 +26,7 @@ class ResourceLexer(RegexLexer): """ name = 'ResourceBundle' aliases = ['resource', 'resourcebundle'] - filenames = ['*.txt'] + filenames = [] _types = (':table', ':array', ':string', ':bin', ':import', ':intvector', ':int', ':alias') diff --git a/pygments/lexers/robotframework.py b/pygments/lexers/robotframework.py index 1288da8..642c90c 100644 --- a/pygments/lexers/robotframework.py +++ b/pygments/lexers/robotframework.py @@ -64,7 +64,7 @@ class RobotFrameworkLexer(Lexer): """ name = 'RobotFramework' aliases = ['robotframework'] - filenames = ['*.txt', '*.robot'] + filenames = ['*.robot'] mimetypes = ['text/x-robotframework'] def __init__(self, **options): diff --git a/pygments/lexers/ruby.py b/pygments/lexers/ruby.py index 723895d..8bcbde6 100644 --- a/pygments/lexers/ruby.py +++ b/pygments/lexers/ruby.py @@ -43,17 +43,17 @@ class RubyLexer(ExtendedRegexLexer): def heredoc_callback(self, match, ctx): # okay, this is the hardest part of parsing Ruby... - # match: 1 = <<-?, 2 = quote? 3 = name 4 = quote? 5 = rest of line + # match: 1 = <<[-~]?, 2 = quote? 3 = name 4 = quote? 5 = rest of line start = match.start(1) - yield start, Operator, match.group(1) # <<-? + yield start, Operator, match.group(1) # <<[-~]? yield match.start(2), String.Heredoc, match.group(2) # quote ", ', ` yield match.start(3), String.Delimiter, match.group(3) # heredoc name yield match.start(4), String.Heredoc, match.group(4) # quote again heredocstack = ctx.__dict__.setdefault('heredocstack', []) outermost = not bool(heredocstack) - heredocstack.append((match.group(1) == '<<-', match.group(3))) + heredocstack.append((match.group(1) in ('<<-', '<<~'), match.group(3))) ctx.pos = match.start(5) ctx.end = match.end(5) @@ -247,10 +247,10 @@ class RubyLexer(ExtendedRegexLexer): Name.Builtin), (r'__(FILE|LINE)__\b', Name.Builtin.Pseudo), # normal heredocs - (r'(?~!:])|' diff --git a/pygments/lexers/rust.py b/pygments/lexers/rust.py index 2b9e312..f731785 100644 --- a/pygments/lexers/rust.py +++ b/pygments/lexers/rust.py @@ -59,7 +59,7 @@ class RustLexer(RegexLexer): tokens = { 'root': [ # rust allows a file to start with a shebang, but if the first line - # starts with #![ then it’s not a shebang but a crate attribute. + # starts with #![ then it's not a shebang but a crate attribute. (r'#![^[\r\n].*$', Comment.Preproc), default('base'), ], @@ -78,10 +78,10 @@ class RustLexer(RegexLexer): (r"""\$([a-zA-Z_]\w*|\(,?|\),?|,?)""", Comment.Preproc), # Keywords (words(( - 'as', 'box', 'const', 'crate', 'else', 'extern', - 'for', 'if', 'impl', 'in', 'loop', 'match', 'move', - 'mut', 'pub', 'ref', 'return', 'static', 'super', - 'trait', 'unsafe', 'use', 'where', 'while'), suffix=r'\b'), + 'as', 'async', 'await', 'box', 'const', 'crate', 'else', + 'extern', 'for', 'if', 'impl', 'in', 'loop', 'match', 'move', + 'mut', 'pub', 'ref', 'return', 'static', 'super', 'trait', + 'try', 'unsafe', 'use', 'where', 'while'), suffix=r'\b'), Keyword), (words(('abstract', 'alignof', 'become', 'do', 'final', 'macro', 'offsetof', 'override', 'priv', 'proc', 'pure', 'sizeof', @@ -95,7 +95,7 @@ class RustLexer(RegexLexer): (r'(default)(\s+)(type|fn)\b', bygroups(Keyword, Text, Keyword)), keyword_types, (r'self\b', Name.Builtin.Pseudo), - # Prelude (taken from Rust’s src/libstd/prelude.rs) + # Prelude (taken from Rust's src/libstd/prelude.rs) builtin_types, # Path seperators, so types don't catch them. (r'::\b', Text), diff --git a/pygments/lexers/scdoc.py b/pygments/lexers/scdoc.py new file mode 100644 index 0000000..4916393 --- /dev/null +++ b/pygments/lexers/scdoc.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.scdoc + ~~~~~~~~~~~~~~~~~~~~~ + + Lexer for scdoc, a simple man page generator. + + :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, \ + using, this +from pygments.token import Text, Comment, Keyword, String, \ + Generic + + +__all__ = ['ScdocLexer'] + + +class ScdocLexer(RegexLexer): + """ + `scdoc` is a simple man page generator for POSIX systems written in C99. + https://git.sr.ht/~sircmpwn/scdoc + + .. versionadded:: 2.5 + """ + name = 'scdoc' + aliases = ['scdoc', 'scd'] + filenames = ['*.scd', '*.scdoc'] + flags = re.MULTILINE + + tokens = { + 'root': [ + # comment + (r'^(;.+\n)', bygroups(Comment)), + + # heading with pound prefix + (r'^(#)([^#].+\n)', bygroups(Generic.Heading, Text)), + (r'^(#{2})(.+\n)', bygroups(Generic.Subheading, Text)), + # bulleted lists + (r'^(\s*)([*-])(\s)(.+\n)', + bygroups(Text, Keyword, Text, using(this, state='inline'))), + # numbered lists + (r'^(\s*)(\.+\.)( .+\n)', + bygroups(Text, Keyword, using(this, state='inline'))), + # quote + (r'^(\s*>\s)(.+\n)', bygroups(Keyword, Generic.Emph)), + # text block + (r'^(```\n)([\w\W]*?)(^```$)', bygroups(String, Text, String)), + + include('inline'), + ], + 'inline': [ + # escape + (r'\\.', Text), + # underlines + (r'(\s)(_[^_]+_)(\W|\n)', bygroups(Text, Generic.Emph, Text)), + # bold + (r'(\s)(\*[^\*]+\*)(\W|\n)', bygroups(Text, Generic.Strong, Text)), + # inline code + (r'`[^`]+`', String.Backtick), + + # general text, must come last! + (r'[^\\\s]+', Text), + (r'.', Text), + ], + } diff --git a/pygments/lexers/shell.py b/pygments/lexers/shell.py index 972c400..c12cb3f 100644 --- a/pygments/lexers/shell.py +++ b/pygments/lexers/shell.py @@ -154,6 +154,9 @@ class ShellSessionBaseLexer(Lexer): .. versionadded:: 2.1 """ + + _venv = re.compile(r'^(\([^)]*\))(\s*)') + def get_tokens_unprocessed(self, text): innerlexer = self._innerLexerCls(**self.options) @@ -164,11 +167,24 @@ class ShellSessionBaseLexer(Lexer): for match in line_re.finditer(text): line = match.group() - m = re.match(self._ps1rgx, line) if backslash_continuation: curcode += line backslash_continuation = curcode.endswith('\\\n') - elif m: + continue + + venv_match = self._venv.match(line) + if venv_match: + venv = venv_match.group(1) + venv_whitespace = venv_match.group(2) + insertions.append((len(curcode), + [(0, Generic.Prompt.VirtualEnv, venv)])) + if venv_whitespace: + insertions.append((len(curcode), + [(0, Text, venv_whitespace)])) + line = line[venv_match.end():] + + m = self._ps1rgx.match(line) + if m: # To support output lexers (say diff output), the output # needs to be broken by prompts whenever the output lexer # changes. @@ -211,9 +227,9 @@ class BashSessionLexer(ShellSessionBaseLexer): mimetypes = ['application/x-shell-session', 'application/x-sh-session'] _innerLexerCls = BashLexer - _ps1rgx = \ + _ps1rgx = re.compile( r'^((?:(?:\[.*?\])|(?:\(\S+\))?(?:| |sh\S*?|\w+\S+[@:]\S+(?:\s+\S+)' \ - r'?|\[\S+[@:][^\n]+\].+))\s*[$#%])(.*\n?)' + r'?|\[\S+[@:][^\n]+\].+))\s*[$#%])(.*\n?)') _ps2 = '>' @@ -540,7 +556,7 @@ class MSDOSSessionLexer(ShellSessionBaseLexer): mimetypes = [] _innerLexerCls = BatchLexer - _ps1rgx = r'^([^>]*>)(.*\n?)' + _ps1rgx = re.compile(r'^([^>]*>)(.*\n?)') _ps2 = 'More? ' @@ -625,7 +641,7 @@ class TcshSessionLexer(ShellSessionBaseLexer): mimetypes = [] _innerLexerCls = TcshLexer - _ps1rgx = r'^([^>]+>)(.*\n?)' + _ps1rgx = re.compile(r'^([^>]+>)(.*\n?)') _ps2 = '? ' @@ -756,7 +772,7 @@ class PowerShellSessionLexer(ShellSessionBaseLexer): mimetypes = [] _innerLexerCls = PowerShellLexer - _ps1rgx = r'^(PS [^>]+> )(.*\n?)' + _ps1rgx = re.compile(r'^(PS [^>]+> )(.*\n?)') _ps2 = '>> ' diff --git a/pygments/lexers/slash.py b/pygments/lexers/slash.py index b631410..76e5929 100644 --- a/pygments/lexers/slash.py +++ b/pygments/lexers/slash.py @@ -26,7 +26,7 @@ class SlashLanguageLexer(ExtendedRegexLexer): def right_angle_bracket(lexer, match, ctx): if len(ctx.stack) > 1 and ctx.stack[-2] == "string": ctx.stack.pop() - yield match.start(), String.Interpol, "}" + yield match.start(), String.Interpol, u"}" ctx.pos = match.end() pass diff --git a/pygments/lexers/solidity.py b/pygments/lexers/solidity.py new file mode 100644 index 0000000..9966837 --- /dev/null +++ b/pygments/lexers/solidity.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.solidity + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Solidity. + + :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups, include, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['SolidityLexer'] + + +class SolidityLexer(RegexLexer): + """ + For Solidity source code. + + .. versionadded:: 2.5 + """ + + name = 'Solidity' + aliases = ['solidity'] + filenames = ['*.sol'] + mimetypes = [] + + flags = re.MULTILINE | re.UNICODE + + datatype = ( + r'\b(address|bool|((bytes|hash|int|string|uint)(8|16|24|32|40|48|56|64' + r'|72|80|88|96|104|112|120|128|136|144|152|160|168|176|184|192|200|208' + r'|216|224|232|240|248|256)?))\b' + ) + + tokens = { + 'root': [ + include('whitespace'), + include('comments'), + (r'\bpragma\s+solidity\b', Keyword, 'pragma'), + (r'\b(contract)(\s+)([a-zA-Z_]\w*)', + bygroups(Keyword, Text.WhiteSpace, Name.Entity)), + (datatype + r'(\s+)((external|public|internal|private)\s+)?' + + r'([a-zA-Z_]\w*)', + bygroups(Keyword.Type, None, None, None, Text.WhiteSpace, Keyword, + None, Name.Variable)), + (r'\b(enum|event|function|struct)(\s+)([a-zA-Z_]\w*)', + bygroups(Keyword.Type, Text.WhiteSpace, Name.Variable)), + (r'\b(msg|block|tx)\.([A-Za-z_][A-Za-z0-9_]*)\b', Keyword), + (words(( + 'block', 'break', 'constant', 'constructor', 'continue', + 'contract', 'do', 'else', 'external', 'false', 'for', + 'function', 'if', 'import', 'inherited', 'internal', 'is', + 'library', 'mapping', 'memory', 'modifier', 'msg', 'new', + 'payable', 'private', 'public', 'require', 'return', + 'returns', 'struct', 'suicide', 'throw', 'this', 'true', + 'tx', 'var', 'while'), prefix=r'\b', suffix=r'\b'), + Keyword.Type), + (words(('keccak256',), prefix=r'\b', suffix=r'\b'), Name.Builtin), + (datatype, Keyword.Type), + include('constants'), + (r'[a-zA-Z_]\w*', Text), + (r'[!<=>+*/-]', Operator), + (r'[.;:{}(),\[\]]', Punctuation) + ], + 'comments': [ + (r'//(\n|[\w\W]*?[^\\]\n)', Comment.Single), + (r'/(\\\n)?[*][\w\W]*?[*](\\\n)?/', Comment.Multiline), + (r'/(\\\n)?[*][\w\W]*', Comment.Multiline) + ], + 'constants': [ + (r'("([\\]"|.)*?")', String.Double), + (r"('([\\]'|.)*?')", String.Single), + (r'\b0[xX][0-9a-fA-F]+\b', Number.Hex), + (r'\b\d+\b', Number.Decimal), + ], + 'pragma': [ + include('whitespace'), + include('comments'), + (r'(\^|>=|<)(\s*)(\d+\.\d+\.\d+)', + bygroups(Operator, Text.WhiteSpace, Keyword)), + (r';', Punctuation, '#pop') + ], + 'whitespace': [ + (r'\s+', Text.WhiteSpace), + (r'\n', Text.WhiteSpace) + ] + } diff --git a/pygments/lexers/special.py b/pygments/lexers/special.py index 1b3e972..4016c59 100644 --- a/pygments/lexers/special.py +++ b/pygments/lexers/special.py @@ -35,6 +35,7 @@ class TextLexer(Lexer): def analyse_text(text): return TextLexer.priority + _ttype_cache = {} line_re = re.compile(b'.*?\n') diff --git a/pygments/lexers/sql.py b/pygments/lexers/sql.py index d789052..0788cd9 100644 --- a/pygments/lexers/sql.py +++ b/pygments/lexers/sql.py @@ -212,7 +212,7 @@ class PlPgsqlLexer(PostgresBase, RegexLexer): mimetypes = ['text/x-plpgsql'] flags = re.IGNORECASE - tokens = dict((k, l[:]) for (k, l) in iteritems(PostgresLexer.tokens)) + tokens = {k: l[:] for (k, l) in iteritems(PostgresLexer.tokens)} # extend the keywords list for i, pattern in enumerate(tokens['root']): @@ -246,7 +246,7 @@ class PsqlRegexLexer(PostgresBase, RegexLexer): aliases = [] # not public flags = re.IGNORECASE - tokens = dict((k, l[:]) for (k, l) in iteritems(PostgresLexer.tokens)) + tokens = {k: l[:] for (k, l) in iteritems(PostgresLexer.tokens)} tokens['root'].append( (r'\\[^\s]+', Keyword.Pseudo, 'psql-command')) @@ -547,7 +547,7 @@ class TransactSqlLexer(RegexLexer): rating = 1.0 else: name_between_backtick_count = len( - name_between_backtick_re.findall((text))) + name_between_backtick_re.findall(text)) name_between_bracket_count = len( name_between_bracket_re.findall(text)) # We need to check if there are any names using @@ -643,7 +643,7 @@ class MySqlLexer(RegexLexer): def analyse_text(text): rating = 0 name_between_backtick_count = len( - name_between_backtick_re.findall((text))) + name_between_backtick_re.findall(text)) name_between_bracket_count = len( name_between_bracket_re.findall(text)) # Same logic as above in the TSQL analysis diff --git a/pygments/lexers/templates.py b/pygments/lexers/templates.py index 8c42363..f891242 100644 --- a/pygments/lexers/templates.py +++ b/pygments/lexers/templates.py @@ -226,7 +226,7 @@ class VelocityLexer(RegexLexer): 'directiveparams'), (r'(#\{?)(' + identifier + r')(\}|\b)', bygroups(Comment.Preproc, Name.Function, Comment.Preproc)), - (r'\$\{?', Punctuation, 'variable') + (r'\$!?\{?', Punctuation, 'variable') ], 'variable': [ (identifier, Name.Variable), @@ -249,7 +249,7 @@ class VelocityLexer(RegexLexer): (r'\]', Operator, '#pop') ], 'funcparams': [ - (r'\$\{?', Punctuation, 'variable'), + (r'\$!?\{?', Punctuation, 'variable'), (r'\s+', Text), (r'[,:]', Punctuation), (r'"(\\\\|\\"|[^"])*"', String.Double), @@ -274,7 +274,7 @@ class VelocityLexer(RegexLexer): rv += 0.15 if re.search(r'#\{?foreach\}?\(.+?\).*?#\{?end\}?', text): rv += 0.15 - if re.search(r'\$\{?[a-zA-Z_]\w*(\([^)]*\))?' + if re.search(r'\$!?\{?[a-zA-Z_]\w*(\([^)]*\))?' r'(\.\w+(\([^)]*\))?)*\}?', text): rv += 0.01 return rv @@ -1802,27 +1802,26 @@ class HandlebarsLexer(RegexLexer): 'root': [ (r'[^{]+', Other), + # Comment start {{! }} or {{!-- (r'\{\{!.*\}\}', Comment), + # HTML Escaping open {{{expression (r'(\{\{\{)(\s*)', bygroups(Comment.Special, Text), 'tag'), + + # {{blockOpen {{#blockOpen {{/blockClose with optional tilde ~ + (r'(\{\{)([#~/]+)([^\s}]*)', bygroups(Comment.Preproc, Number.Attribute,Number.Attribute), 'tag'), (r'(\{\{)(\s*)', bygroups(Comment.Preproc, Text), 'tag'), ], 'tag': [ (r'\s+', Text), + # HTML Escaping close }}} (r'\}\}\}', Comment.Special, '#pop'), - (r'\}\}', Comment.Preproc, '#pop'), - - # Handlebars - (r'([#/]*)(each|if|unless|else|with|log|in(line)?)', bygroups(Keyword, - Keyword)), - (r'#\*inline', Keyword), - - # General {{#block}} - (r'([#/])([\w-]+)', bygroups(Name.Function, Name.Function)), + # blockClose}}, includes optional tilde ~ + (r'(~?)(\}\})', bygroups(Number, Comment.Preproc), '#pop'), # {{opt=something}} - (r'([\w-]+)(=)', bygroups(Name.Attribute, Operator)), + (r'([^\s}]+)(=)', bygroups(Name.Attribute, Operator)), # Partials {{> ...}} (r'(>)(\s*)(@partial-block)', bygroups(Keyword, Text, Keyword)), @@ -1845,7 +1844,7 @@ class HandlebarsLexer(RegexLexer): include('generic'), ], 'variable': [ - (r'[a-zA-Z][\w-]*', Name.Variable), + (r'[()/@a-zA-Z][\w-]*', Name.Variable), (r'\.[\w-]+', Name.Variable), (r'(this\/|\.\/|(\.\.\/)+)[\w-]+', Name.Variable), ], diff --git a/pygments/lexers/teraterm.py b/pygments/lexers/teraterm.py index 0fd1778..1d7483d 100644 --- a/pygments/lexers/teraterm.py +++ b/pygments/lexers/teraterm.py @@ -154,5 +154,5 @@ class TeraTermLexer(RegexLexer): def analyse_text(text): result = 0.0 if re.search(TeraTermLexer.tokens['commands'][0][0], text): - result += 0.60 + result += 0.01 return result diff --git a/pygments/lexers/textfmts.py b/pygments/lexers/textfmts.py index a3aed0c..d3a191b 100644 --- a/pygments/lexers/textfmts.py +++ b/pygments/lexers/textfmts.py @@ -11,12 +11,14 @@ import re -from pygments.lexer import RegexLexer, bygroups +from pygments.lexers import guess_lexer, get_lexer_by_name +from pygments.lexer import RegexLexer, bygroups, default, do_insertions from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Generic, Literal + Number, Generic, Literal, Punctuation from pygments.util import ClassNotFound -__all__ = ['IrcLogsLexer', 'TodotxtLexer', 'HttpLexer', 'GettextLexer'] +__all__ = ['IrcLogsLexer', 'TodotxtLexer', 'HttpLexer', 'GettextLexer', + 'NotmuchLexer'] class IrcLogsLexer(RegexLexer): @@ -295,3 +297,86 @@ class TodotxtLexer(RegexLexer): (r'\s+', IncompleteTaskText), ], } + + +class NotmuchLexer(RegexLexer): + """ + For `Notmuch `_ email text format. + + .. versionadded:: 2.5 + + Additional options accepted: + + `body_lexer` + If given, highlight the contents of the message body with the specified + lexer, else guess it according to the body content (default: ``None``). + """ + + name = 'Notmuch' + aliases = ['notmuch'] + + def _highlight_code(self, match): + code = match.group(1) + + try: + if self.body_lexer: + lexer = get_lexer_by_name(self.body_lexer) + else: + lexer = guess_lexer(code.strip()) + except ClassNotFound: + lexer = get_lexer_by_name('text') + + for item in lexer.get_tokens_unprocessed(code): + yield item + + tokens = { + 'root': [ + (r'\fmessage{\s*', Keyword, ('message', 'message-attr')), + ], + 'message-attr': [ + (r'(\s*id:\s*)([^\s]+)', bygroups(Name.Attribute, String)), + (r'(\s*(?:depth|match|excluded):\s*)(\d+)', + bygroups(Name.Attribute, Number.Integer)), + (r'(\s*filename:\s*)(.+\n)', + bygroups(Name.Attribute, String)), + default('#pop'), + ], + 'message': [ + (r'\fmessage}\n', Keyword, '#pop'), + (r'\fheader{\n', Keyword, 'header'), + (r'\fbody{\n', Keyword, 'body'), + ], + 'header': [ + (r'\fheader}\n', Keyword, '#pop'), + (r'((?:Subject|From|To|Cc|Date):\s*)(.*\n)', + bygroups(Name.Attribute, String)), + (r'(.*)(\s*\(.*\))(\s*\(.*\)\n)', + bygroups(Generic.Strong, Literal, Name.Tag)), + ], + 'body': [ + (r'\fpart{\n', Keyword, 'part'), + (r'\f(part|attachment){\s*', Keyword, ('part', 'part-attr')), + (r'\fbody}\n', Keyword, '#pop'), + ], + 'part-attr': [ + (r'(ID:\s*)(\d+)', bygroups(Name.Attribute, Number.Integer)), + (r'(,\s*)((?:Filename|Content-id):\s*)([^,]+)', + bygroups(Punctuation, Name.Attribute, String)), + (r'(,\s*)(Content-type:\s*)(.+\n)', + bygroups(Punctuation, Name.Attribute, String)), + default('#pop'), + ], + 'part': [ + (r'\f(?:part|attachment)}\n', Keyword, '#pop'), + (r'\f(?:part|attachment){\s*', Keyword, ('#push', 'part-attr')), + (r'^Non-text part: .*\n', Comment), + (r'(?s)(.*?(?=\f(?:part|attachment)}\n))', _highlight_code), + ], + } + + def analyse_text(text): + return 1.0 if text.startswith('\fmessage{') else 0.0 + + def __init__(self, **options): + self.body_lexer = options.get('body_lexer', None) + RegexLexer.__init__(self, **options) diff --git a/pygments/lexers/typoscript.py b/pygments/lexers/typoscript.py index 3d08cef..745292b 100644 --- a/pygments/lexers/typoscript.py +++ b/pygments/lexers/typoscript.py @@ -113,9 +113,6 @@ class TypoScriptLexer(RegexLexer): flags = re.DOTALL | re.MULTILINE - # Slightly higher than TypeScript (which is 0). - priority = 0.0 - tokens = { 'root': [ include('comment'), diff --git a/pygments/lexers/zig.py b/pygments/lexers/zig.py new file mode 100644 index 0000000..7850fdf --- /dev/null +++ b/pygments/lexers/zig.py @@ -0,0 +1,126 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.zig + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for Zig. + + :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re +from pygments.lexer import RegexLexer, bygroups, include, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error, Whitespace + +__all__ = ['ZigLexer'] + +class ZigLexer(RegexLexer): + """ + For `Zig `_ source code. + + grammar: https://ziglang.org/documentation/master/#Grammar + """ + name = 'Zig' + aliases = ['zig'] + filenames = ['*.zig'] + mimetypes = ['text/zig'] + + type_keywords = ( + words(('bool', 'f16', 'f32', 'f64', 'f128', 'void', 'noreturn', 'type', 'anyerror', 'promise', + 'i0', 'u0', 'isize', 'usize', 'comptime_int', 'comptime_float', + 'c_short', 'c_ushort', 'c_int', 'c_uint', 'c_long', 'c_ulong', 'c_longlong', 'c_ulonglong', 'c_longdouble', 'c_void' + 'i8', 'u8', 'i16', 'u16', 'i32', 'u32', 'i64', 'u64', 'i128', 'u128' + ), suffix=r'\b'), + Keyword.Type) + + storage_keywords = ( + words(('const', 'var', 'extern', 'packed', 'export', 'pub', 'noalias', + 'inline', 'comptime', 'nakedcc', 'stdcallcc', 'volatile', 'allowzero', + 'align', 'linksection', 'threadlocal'), suffix=r'\b'), + Keyword.Reserved) + + structure_keywords = ( + words(('struct', 'enum', 'union', 'error'), suffix=r'\b'), + Keyword) + + statement_keywords = ( + words(('break', 'return', 'continue', 'asm', 'defer', 'errdefer', + 'unreachable', 'try', 'catch', 'async', 'await', 'suspend', + 'resume', 'cancel'), suffix=r'\b'), + Keyword) + + conditional_keywords = ( + words(('if', 'else', 'switch', 'and', 'or', 'orelse'), suffix=r'\b'), + Keyword) + + repeat_keywords = ( + words(('while', 'for'), suffix=r'\b'), + Keyword) + + other_keywords = ( + words(('fn', 'usingnamespace', 'test'), suffix=r'\b'), + Keyword) + + constant_keywords = ( + words(('true', 'false', 'null', 'undefined'), suffix=r'\b'), + Keyword.Constant) + + tokens = { + 'root': [ + (r'\n', Whitespace), + (r'\s+', Whitespace), + (r'//.*?\n', Comment.Single), + + # Keywords + statement_keywords, + storage_keywords, + structure_keywords, + repeat_keywords, + type_keywords, + constant_keywords, + conditional_keywords, + other_keywords, + + # Floats + (r'0x[0-9a-fA-F]+\.[0-9a-fA-F]+([pP][\-+]?[0-9a-fA-F]+)?', Number.Float), + (r'0x[0-9a-fA-F]+\.?[pP][\-+]?[0-9a-fA-F]+', Number.Float), + (r'[0-9]+\.[0-9]+([eE][-+]?[0-9]+)?', Number.Float), + (r'[0-9]+\.?[eE][-+]?[0-9]+', Number.Float), + + # Integers + (r'0b[01]+', Number.Bin), + (r'0o[0-7]+', Number.Oct), + (r'0x[0-9a-fA-F]+', Number.Hex), + (r'[0-9]+', Number.Integer), + + # Identifier + (r'@[a-zA-Z_]\w*',Name.Builtin), + (r'[a-zA-Z_]\w*', Name), + + # Characters + (r'\'\\\'\'', String.Escape), + (r'\'\\(|x[a-fA-F0-9]{2}|u[a-fA-F0-9]{4}|U[a-fA-F0-9]{6}|[nr\\t\'"])\'', String.Escape), + (r'\'[^\\\']\'', String), + + # Strings + (r'\\\\[^\n]*', String.Heredoc), + (r'c\\\\[^\n]*', String.Heredoc), + (r'c?"',String, 'string'), + + # Operators, Punctuation + (r'[+%=><|^!?/\-*&~:]', Operator), + (r'[{}()\[\],.;]', Punctuation) + ], + 'string': [ + (r'\\(x[a-fA-F0-9]{2}|u[a-fA-F0-9]{4}|U[a-fA-F0-9]{6}|[nr\\t\'"])', String.Escape), + (r'[^\\"\n]+', String), + (r'"', String, '#pop') + ] + } + + def get_tokens_unprocessed(self, text): + for index, token, value in \ + RegexLexer.get_tokens_unprocessed(self, text): + yield index, token, value diff --git a/pygments/styles/__init__.py b/pygments/styles/__init__.py index 8de9e43..c061471 100644 --- a/pygments/styles/__init__.py +++ b/pygments/styles/__init__.py @@ -50,6 +50,7 @@ STYLE_MAP = { 'stata': 'stata_light::StataLightStyle', 'stata-light': 'stata_light::StataLightStyle', 'stata-dark': 'stata_dark::StataDarkStyle', + 'inkpot': 'inkpot::InkPotStyle', } diff --git a/pygments/styles/inkpot.py b/pygments/styles/inkpot.py new file mode 100644 index 0000000..a030b8b --- /dev/null +++ b/pygments/styles/inkpot.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +""" + pygments.styles.inkpot + ~~~~~~~~~~~~~~~~~~~~~~ + + A highlighting style for Pygments, inspired by the Inkpot theme for VIM. + + :copyright: Copyright 2018 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.style import Style +from pygments.token import Text, Other, \ + Keyword, Name, Comment, String, Error, \ + Number, Operator, Generic, Whitespace, Punctuation + + +class InkPotStyle(Style): + background_color = "#1e1e27" + default_style = "" + styles = { + Text: "#cfbfad", + Other: "#cfbfad", + Whitespace: "#434357", + Comment: "#cd8b00", + Comment.Preproc: "#409090", + Comment.PreprocFile: "bg:#404040 #ffcd8b", + Comment.Special: "#808bed", + + Keyword: "#808bed", + Keyword.Pseudo: "nobold", + Keyword.Type: "#ff8bff", + + Operator: "#666666", + + Punctuation: "#cfbfad", + + Name: "#cfbfad", + Name.Attribute: "#cfbfad", + Name.Builtin.Pseudo: '#ffff00', + Name.Builtin: "#808bed", + Name.Class: "#ff8bff", + Name.Constant: "#409090", + Name.Decorator: "#409090", + Name.Exception: "#ff0000", + Name.Function: "#c080d0", + Name.Label: "#808bed", + Name.Namespace: "#ff0000", + Name.Variable: "#cfbfad", + + String: "bg:#404040 #ffcd8b", + String.Doc: "#808bed", + + Number: "#f0ad6d", + + Generic.Heading: "bold #000080", + Generic.Subheading: "bold #800080", + Generic.Deleted: "#A00000", + Generic.Inserted: "#00A000", + Generic.Error: "#FF0000", + Generic.Emph: "italic", + Generic.Strong: "bold", + Generic.Prompt: "bold #000080", + Generic.Output: "#888", + Generic.Traceback: "#04D", + + Error: "bg:#6e2e2e #ffffff" + } diff --git a/pygments/styles/monokai.py b/pygments/styles/monokai.py index 4580df2..c9db9f2 100644 --- a/pygments/styles/monokai.py +++ b/pygments/styles/monokai.py @@ -92,14 +92,15 @@ class MonokaiStyle(Style): String.Single: "", # class: 's1' String.Symbol: "", # class: 'ss' + Generic: "", # class: 'g' Generic.Deleted: "#f92672", # class: 'gd', Generic.Emph: "italic", # class: 'ge' Generic.Error: "", # class: 'gr' Generic.Heading: "", # class: 'gh' Generic.Inserted: "#a6e22e", # class: 'gi' - Generic.Output: "", # class: 'go' - Generic.Prompt: "", # class: 'gp' + Generic.Output: "#66d9ef", # class: 'go' + Generic.Prompt: "bold #f92672", # class: 'gp' Generic.Strong: "bold", # class: 'gs' Generic.Subheading: "#75715e", # class: 'gu' Generic.Traceback: "", # class: 'gt' diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 4754a9d..0000000 --- a/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -coverage -nose -pyflakes -pylint -tox diff --git a/scripts/.release-checklist.swp b/scripts/.release-checklist.swp deleted file mode 100644 index 2ec1a56..0000000 Binary files a/scripts/.release-checklist.swp and /dev/null differ diff --git a/scripts/release-checklist b/scripts/release-checklist index efc1e1e..f1bd0f3 100644 --- a/scripts/release-checklist +++ b/scripts/release-checklist @@ -1,24 +1,24 @@ Release checklist ================= -* Check ``hg status`` +* Check ``git status`` * ``make check`` * LATER when configured properly: ``make pylint`` * ``tox`` * Update version info in ``setup.py/__init__.py`` * Check setup.py metadata: long description, trove classifiers * Update release date/code name in ``CHANGES`` -* ``hg commit`` +* ``git commit`` * ``make clean`` * ``python2 setup.py release bdist_wheel`` * ``python3 setup.py release bdist_wheel sdist`` * ``twine upload dist/Pygments-$NEWVER*`` * Check PyPI release page for obvious errors -* ``hg tag`` +* ``git tag -a`` * Merge default into stable if this was a ``x.y.0`` * Update homepage (release info), regenerate docs (+printable!) * Add new version/milestone to tracker categories * Write announcement and send to mailing list/python-announce * Update version info, add new ``CHANGES`` entry for next version -* ``hg commit`` -* ``hg push`` +* ``git commit`` +* ``git push`` diff --git a/setup.py b/setup.py index e3d43fc..61f1c7f 100755 --- a/setup.py +++ b/setup.py @@ -21,35 +21,11 @@ are: :license: BSD, see LICENSE for details. """ -try: - from setuptools import setup, find_packages - have_setuptools = True -except ImportError: - from distutils.core import setup - def find_packages(*args, **kwargs): - return [ - 'pygments', - 'pygments.lexers', - 'pygments.formatters', - 'pygments.styles', - 'pygments.filters', - ] - have_setuptools = False - -if have_setuptools: - add_keywords = dict( - entry_points = { - 'console_scripts': ['pygmentize = pygments.cmdline:main'], - }, - ) -else: - add_keywords = dict( - scripts = ['pygmentize'], - ) +from setuptools import setup, find_packages setup( name = 'Pygments', - version = '2.4.2', + version = '2.5.1', url = 'http://pygments.org/', license = 'BSD License', author = 'Georg Brandl', @@ -57,7 +33,10 @@ setup( description = 'Pygments is a syntax highlighting package written in Python.', long_description = __doc__, keywords = 'syntax highlighting', - packages = find_packages(), + packages = find_packages(include=['pygments']), + entry_points = { + 'console_scripts': ['pygmentize = pygments.cmdline:main'], + }, platforms = 'any', zip_safe = False, include_package_data = True, @@ -75,9 +54,11 @@ setup( 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: Implementation :: CPython', + 'Programming Language :: Python :: Implementation :: PyPy', 'Operating System :: OS Independent', 'Topic :: Text Processing :: Filters', 'Topic :: Utilities', ], - **add_keywords ) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/examplefiles/MIME_example.eml b/tests/examplefiles/MIME_example.eml new file mode 100644 index 0000000..e25ef5a --- /dev/null +++ b/tests/examplefiles/MIME_example.eml @@ -0,0 +1,34 @@ +From: Some One +MIME-Version: 1.0 +Content-Type: multipart/mixed; + boundary="+Testboundary text" + +This is a multipart message in MIME format. + +--+Testboundary text +Content-Type: multipart/alternative; + boundary="hello, boundary" + +--hello, boundary +Content-Type: text/plain + +this is the body text + +--hello, boundary +Content-Type: text/html; + charset="utf-8" +Content-Transfer-Encoding: quoted-printable + +This is the body text + +--hello, boundary-- +--+Testboundary text +Content-Type: text/plain; +Content-Disposition: attachment; + filename="test.txt" +Content-Transfer-Encoding: base64 + +dGhpcyBpcyB0aGUgYXR0YWNobWVudCB0ZXh0 + +--+Testboundary text-- +Some additional content here. diff --git a/tests/examplefiles/example.eml b/tests/examplefiles/example.eml new file mode 100644 index 0000000..e25427f --- /dev/null +++ b/tests/examplefiles/example.eml @@ -0,0 +1,92 @@ +Mime-Version: 1.0 (Apple Message framework v730) +Content-Type: multipart/mixed; boundary=Apple-Mail-13-196941151 +Message-Id: <9169D984-4E0B-45EF-82D4-8F5E53AD7012@example.com> +From: foo@example.com +Subject: testing +Date: Mon, 6 Jun 2005 22:21:22 +0200 +To: blah@example.com + + +--Apple-Mail-13-196941151 +Content-Transfer-Encoding: quoted-printable +Content-Type: text/plain; + charset=ISO-8859-1; + delsp=yes; + format=flowed + +This is the first part. + +--Apple-Mail-13-196941151 +Content-Type: message/rfc822; + name="ForwardedMessage.eml"; + +Return-Path: +X-Original-To: xxxx@xxxx.com +Delivered-To: xxxx@xxxx.com +Received: from localhost (localhost [127.0.0.1]) + by xxx.xxxxx.com (Postfix) with ESMTP id 50FD3A96F + for ; Tue, 10 May 2005 17:26:50 +0000 (GMT) +Received: from xxx.xxxxx.com ([127.0.0.1]) + by localhost (xxx.xxxxx.com [127.0.0.1]) (amavisd-new, port 10024) + with LMTP id 70060-03 for ; + Tue, 10 May 2005 17:26:49 +0000 (GMT) +Received: from xxx.xxxxx.com (xxx.xxxxx.com [69.36.39.150]) + by xxx.xxxxx.com (Postfix) with ESMTP id 8B957A94B + for ; Tue, 10 May 2005 17:26:48 +0000 (GMT) +Received: from xxx.xxxxx.com (xxx.xxxxx.com [64.233.184.203]) + by xxx.xxxxx.com (Postfix) with ESMTP id 9972514824C + for ; Tue, 10 May 2005 12:26:40 -0500 (CDT) +Received: by xxx.xxxxx.com with SMTP id 68so1694448wri + for ; Tue, 10 May 2005 10:26:40 -0700 (PDT) +DomainKey-Signature: a=rsa-sha1; q=dns; c=nofws; + s=beta; d=xxxxx.com; + h=received:message-id:date:from:reply-to:to:subject:mime-version:content-type; + b=g8ZO5ttS6GPEMAz9WxrRk9+9IXBUfQIYsZLL6T88+ECbsXqGIgfGtzJJFn6o9CE3/HMrrIGkN5AisxVFTGXWxWci5YA/7PTVWwPOhJff5BRYQDVNgRKqMl/SMttNrrRElsGJjnD1UyQ/5kQmcBxq2PuZI5Zc47u6CILcuoBcM+A= +Received: by 10.54.96.19 with SMTP id t19mr621017wrb; + Tue, 10 May 2005 10:26:39 -0700 (PDT) +Received: by 10.54.110.5 with HTTP; Tue, 10 May 2005 10:26:39 -0700 (PDT) +Message-ID: +Date: Tue, 10 May 2005 11:26:39 -0600 +From: Test Tester +Reply-To: Test Tester +To: xxxx@xxxx.com, xxxx@xxxx.com +Subject: Another PDF +Mime-Version: 1.0 +Content-Type: multipart/mixed; + boundary="----=_Part_2192_32400445.1115745999735" +X-Virus-Scanned: amavisd-new at textdrive.com + +------=_Part_2192_32400445.1115745999735 +Content-Type: text/plain; charset=ISO-8859-1 +Content-Transfer-Encoding: quoted-printable +Content-Disposition: inline + +Just attaching another PDF, here, to see what the message looks like, +and to see if I can figure out what is going wrong here. + +------=_Part_2192_32400445.1115745999735 +Content-Type: application/pdf; name="broken.pdf" +Content-Transfer-Encoding: base64 +Content-Disposition: attachment; filename="broken.pdf" + +JVBERi0xLjQNCiXk9tzfDQoxIDAgb2JqDQo8PCAvTGVuZ3RoIDIgMCBSDQogICAvRmlsdGVyIC9G +bGF0ZURlY29kZQ0KPj4NCnN0cmVhbQ0KeJy9Wt2KJbkNvm/od6jrhZxYln9hWEh2p+8HBvICySaE +ycLuTV4/1ifJ9qnq09NpSBimu76yLUuy/qzqcPz7+em3Ixx/CDc6CsXxs3b5+fvfjr/8cPz6/BRu +rbfAx/n3739/fuJylJ5u5fjX81OuDr4deK4Bz3z/aDP+8fz0yw8g0Ofq7ktr1Mn+u28rvhy/jVeD +QSa+9YNKHP/pxjvDNfVAx/m3MFz54FhvTbaseaxiDoN2LeMVMw+yA7RbHSCDzxZuaYB2E1Yay7QU +x89vz0+tyFDKMlAHK5yqLmnjF+c4RjEiQIUeKwblXMe+AsZjN1J5yGQL5DHpDHksurM81rF6PKab +gK6zAarIDzIiUY23rJsN9iorAE816aIu6lsgAdQFsuhhkHOUFgVjp2GjMqSewITXNQ27jrMeamkg +1rPI3iLWG2CIaSBB+V1245YVRICGbbpYKHc2USFDl6M09acQVQYhlwIrkBNLISvXhGlF1wi5FHCw +wxZkoGNJlVeJCEsqKA+3YAV5AMb6KkeaqEJQmFKKQU8T1pRi2ihE1Y4CDrqoYFFXYjJJOatsyzuI +8SIlykuxKTMibWK8H1PgEvqYgs4GmQSrEjJAalgGirIhik+p4ZQN9E3ETFPAHE1b8pp1l/0Rc1gl +fQs0ABWvyoZZzU8VnPXwVVcO9BEsyjEJaO6eBoZRyKGlrKoYoOygA8BGIzgwN3RQ15ouigG5idZQ +fx2U4Db2CqiLO0WHAZoylGiCAqhniNQjFjQPSkmjwfNTgQ6M1Ih+eWo36wFmjIxDJZiGUBiWsAyR +xX3EekGOizkGI96Ol9zVZTAivikURhRsHh2E3JhWMpSTZCnnonrLhMCodgrNcgo4uyJUJc6qnVss +nrGd1Ptr0YwisCOYyIbUwVjV4xBUNLbguSO2YHujonAMJkMdSI7bIw91Akq2AUlMUWGFTMAOamjU +OvZQCxIkY2pCpMFo/IwLdVLHs6nddwTRrgoVbvLU9eB0G4EMndV0TNoxHbt3JBWwK6hhv3iHfDtF +yokB302IpEBTnWICde4uYc/1khDbSIkQopO6lcqamGBu1OSE3N5IPSsZX00CkSHRiiyx6HQIShsS +HSVNswdVsaOUSAWq9aYhDtGDaoG5a3lBGkYt/lFlBFt1UqrYnzVtUpUQnLiZeouKgf1KhRBViRRk +ExepJCzTwEmFDalIRbLEGtw0gfpESOpIAF/NnpPzcVCG86s0g2DuSyd41uhNGbEgaSrWEXORErbw +------=_Part_2192_32400445.1115745999735-- + +--Apple-Mail-13-196941151-- diff --git a/tests/examplefiles/example.praat b/tests/examplefiles/example.praat index 8557391..2b782b8 100644 --- a/tests/examplefiles/example.praat +++ b/tests/examplefiles/example.praat @@ -1,4 +1,5 @@ form Highlighter test + # This is a regular comment sentence Blank sentence My_sentence This should all be a string text My_text This should also all be a string @@ -7,9 +8,11 @@ form Highlighter test boolean Text no boolean Quoted "yes" comment This should be a string - optionmenu Choice: 1 + optionmenu Drop-down: 1 + option Foo + option 100 + choice Radio: 1 option Foo - option Bar option 100 real left_Range -123.6 positive right_Range_max 3.3 @@ -17,6 +20,25 @@ form Highlighter test natural Nat 4 endform +beginPause: "Highlighter test" + sentence: "Blank", "" + sentence: "My sentence", "This should all be a string" + text: "My text", "This should also all be a string" + word: "My word", "Only the first word is a string, the rest is discarded" + boolean: "Binary", 1 + comment: "This should be a string" + optionMenu: "Drop-down", 1 + option: "Foo" + option: "100" + choice: "Choice", 1 + option: "Foo" + option: "100" + real: "left Range", -123.6 + positive: "right Range max", 3.3 + integer: "Int", 4 + natural: "Nat", 4 +button = endPause("Cancel", "OK", 1, 2) + # Periods do not establish boundaries for keywords form.var = 10 # Or operators @@ -30,8 +52,7 @@ execute /path/to/file # Predefined variables a = praatVersion -a = e -a = pi +a = e + pi * ( all+right) / left mod average + (mono - stereo) a$ = homeDirectory$ + tab$ + newline$ a$ = temporaryDirectory$ a$ = praatVersion$ @@ -40,6 +61,9 @@ a$ = homeDirectory$ a$ = preferencesDirectory$ a$ = defaultDirectory$ nocheck selectObject: undefined +# Not predefined variables +a$ = e$ +a$ = pi$ # Arrays are not comments a# = zero# (5, 6) @@ -59,9 +83,43 @@ else macintosh == 1 exit We are on Mac endif -string$ = "Strings can be 'interpolated'" +# Interpolation with precision digits +echo unquoted 'a:3' +echo unquoted 'a.a:3' +echo unquoted 'a[1]:3' +echo unquoted 'a1:3' + +appendInfoLine: "quoted 'a:3'" +appendInfoLine: "quoted 'a.a:3'" +appendInfoLine: "quoted 'a[1]:3'" +appendInfoLine: "quoted 'a1:3'" + +# Interpolations are not recursive +echo unquoted 'a'1':3' +appendInfoLine: "quoted 'a'1':3'" + +# Interpolation without precision digits +echo unquoted 'var' numeric +echo unquoted 'var$' string +echo unquoted 'var["a"]' numeric hash +echo unquoted 'var$["a"]' string hash +echo unquoted 'var[1]' numeric indexed variable +echo unquoted 'var$[1]' string indexed variable + +appendInfoLine: "quoted 'var' numeric" +appendInfoLine: "quoted 'var$' string" +appendInfoLine: "quoted 'var["a"]' numeric hash" +appendInfoLine: "quoted 'var$["a"]' string hash" +appendInfoLine: "quoted 'var[1]' numeric indexed variable" +appendInfoLine: "quoted 'var$[1]' string indexed variable" + +# Indeces in interpolations must be literal +echo 'var[a]' +echo 'var[a$]' + string$ = "But don't interpolate everything!" -string$(10) +string$ = "interpolatin' " + "across" + " strings ain't cool either" +string$(10) ; This is a function repeat string$ = string$ - right$(string$) @@ -77,6 +135,12 @@ value$ = Table_'table'$[25, "f0"] fixed = Sound_10.xmin fixed = Object_foo.xmin fixed = Procrustes_foo.nx +var["vaa"] = 1 ; Hash + +# Special two-word keyword +select all +# Keyword with a predefined variable +select all # old-style procedure call call oldStyle "quoted" 2 unquoted string @@ -103,7 +167,7 @@ endfor i = 1 while i < n - i++ + i += 1 # Different styles of object selection select sound'i' sound = selected() @@ -153,7 +217,7 @@ while i < n ..."duration response" # Function call with trailing space - removeObject: pitch, table + removeObject: pitch, table # Picture window commands selectObject: sound @@ -251,7 +315,7 @@ procedure newStyle (.str1$, .num, .str2$) .local = Get total duration .local = Get 'some' duration .local = Get 'some[1]' value... hello 10 p[i] - .local = Get 'some[1,3]' value: "hello", 10, 'p[i]' + .local = Get 'some[1,3]' value: "hello", 10, p[i] .local = Get 'some$' duration .local = Get 'some$[1]' duration endproc diff --git a/tests/examplefiles/example.shex b/tests/examplefiles/example.shex new file mode 100644 index 0000000..8fab2c8 --- /dev/null +++ b/tests/examplefiles/example.shex @@ -0,0 +1,20 @@ +PREFIX rdf: +PREFIX rdfs: +PREFIX schema: +PREFIX skos: +PREFIX xsd: +PREFIX ex: + +ex:Lexer { + rdfs:label xsd:string; + skos:altLabel xsd:string*; + ex:filenames xsd:string+; + ex:mimetypes xsd:string+; + ex:priority xsd:decimal MinInclusive 0.0 MaxExclusive 1.0; # seems to be the de facto range of currently defined priorities + ex:lexes @ex:Language*; +} + +ex:Language { + schema:description rdf:langString*; + schema:url IRI?; +} diff --git a/tests/examplefiles/example.zig b/tests/examplefiles/example.zig new file mode 100644 index 0000000..32e7284 --- /dev/null +++ b/tests/examplefiles/example.zig @@ -0,0 +1,263 @@ +const std = @import("std"); +const Allocator = mem.Allocator; +const mem = std.mem; +const ast = std.zig.ast; +const Visib = @import("visib.zig").Visib; +const event = std.event; +const Value = @import("value.zig").Value; +const Token = std.zig.Token; +const errmsg = @import("errmsg.zig"); +const Scope = @import("scope.zig").Scope; +const Compilation = @import("compilation.zig").Compilation; + +pub const Decl = struct { + id: Id, + name: []const u8, + visib: Visib, + resolution: event.Future(Compilation.BuildError!void), + parent_scope: *Scope, + + // TODO when we destroy the decl, deref the tree scope + tree_scope: *Scope.AstTree, + + pub const Table = std.HashMap([]const u8, *Decl, mem.hash_slice_u8, mem.eql_slice_u8); + + pub fn cast(base: *Decl, comptime T: type) ?*T { + if (base.id != @field(Id, @typeName(T))) return null; + return @fieldParentPtr(T, "base", base); + } + + pub fn isExported(base: *const Decl, tree: *ast.Tree) bool { + switch (base.id) { + Id.Fn => { + const fn_decl = @fieldParentPtr(Fn, "base", base); + return fn_decl.isExported(tree); + }, + else => return false, + } + } + + pub fn getSpan(base: *const Decl) errmsg.Span { + switch (base.id) { + Id.Fn => { + const fn_decl = @fieldParentPtr(Fn, "base", base); + const fn_proto = fn_decl.fn_proto; + const start = fn_proto.fn_token; + const end = fn_proto.name_token orelse start; + return errmsg.Span{ + .first = start, + .last = end + 1, + }; + }, + else => @panic("TODO"), + } + } + + pub fn findRootScope(base: *const Decl) *Scope.Root { + return base.parent_scope.findRoot(); + } + + pub const Id = enum { + Var, + Fn, + CompTime, + }; + + pub const Var = struct { + base: Decl, + }; + + pub const Fn = struct { + base: Decl, + value: Val, + fn_proto: *ast.Node.FnProto, + + // TODO https://github.com/ziglang/zig/issues/683 and then make this anonymous + pub const Val = union(enum) { + Unresolved: void, + Fn: *Value.Fn, + FnProto: *Value.FnProto, + }; + + pub fn externLibName(self: Fn, tree: *ast.Tree) ?[]const u8 { + return if (self.fn_proto.extern_export_inline_token) |tok_index| x: { + const token = tree.tokens.at(tok_index); + break :x switch (token.id) { + Token.Id.Extern => tree.tokenSlicePtr(token), + else => null, + }; + } else null; + } + + pub fn isExported(self: Fn, tree: *ast.Tree) bool { + if (self.fn_proto.extern_export_inline_token) |tok_index| { + const token = tree.tokens.at(tok_index); + return token.id == Token.Id.Keyword_export; + } else { + return false; + } + } + }; + + pub const CompTime = struct { + base: Decl, + }; +}; + +pub const info_zen = + \\ + \\ * Communicate intent precisely. + \\ * Edge cases matter. + \\ * Favor reading code over writing code. + \\ * Only one obvious way to do things. + \\ * Runtime crashes are better than bugs. + \\ * Compile errors are better than runtime crashes. + \\ * Incremental improvements. + \\ * Avoid local maximums. + \\ * Reduce the amount one must remember. + \\ * Minimize energy spent on coding style. + \\ * Together we serve end users. + \\ + \\ +; + +fn cmdZen(allocator: *Allocator, args: []const []const u8) !void { + try stdout.write(info_zen); +} + +const usage_internal = + \\usage: zig internal [subcommand] + \\ + \\Sub-Commands: + \\ build-info Print static compiler build-info + \\ + \\ +; + +fn cmdInternal(allocator: *Allocator, args: []const []const u8) !void { + if (args.len == 0) { + try stderr.write(usage_internal); + os.exit(1); + } + + const sub_commands = []Command{Command{ + .name = "build-info", + .exec = cmdInternalBuildInfo, + }}; + + for (sub_commands) |sub_command| { + if (mem.eql(u8, sub_command.name, args[0])) { + try sub_command.exec(allocator, args[1..]); + return; + } + } + + try stderr.print("unknown sub command: {}\n\n", args[0]); + try stderr.write(usage_internal); +} + +fn cmdInternalBuildInfo(allocator: *Allocator, args: []const []const u8) !void { + try stdout.print( + \\ZIG_CMAKE_BINARY_DIR {} + \\ZIG_CXX_COMPILER {} + \\ZIG_LLVM_CONFIG_EXE {} + \\ZIG_LLD_INCLUDE_PATH {} + \\ZIG_LLD_LIBRARIES {} + \\ZIG_STD_FILES {} + \\ZIG_C_HEADER_FILES {} + \\ZIG_DIA_GUIDS_LIB {} + \\ + , + std.cstr.toSliceConst(c.ZIG_CMAKE_BINARY_DIR), + std.cstr.toSliceConst(c.ZIG_CXX_COMPILER), + std.cstr.toSliceConst(c.ZIG_LLVM_CONFIG_EXE), + std.cstr.toSliceConst(c.ZIG_LLD_INCLUDE_PATH), + std.cstr.toSliceConst(c.ZIG_LLD_LIBRARIES), + std.cstr.toSliceConst(c.ZIG_STD_FILES), + std.cstr.toSliceConst(c.ZIG_C_HEADER_FILES), + std.cstr.toSliceConst(c.ZIG_DIA_GUIDS_LIB), + ); +} + +fn test__floatuntisf(a: u128, expected: f32) void { + const x = __floatuntisf(a); + testing.expect(x == expected); +} + +test "floatuntisf" { + test__floatuntisf(0, 0.0); + + test__floatuntisf(1, 1.0); + test__floatuntisf(2, 2.0); + test__floatuntisf(20, 20.0); + + test__floatuntisf(0x7FFFFF8000000000, 0x1.FFFFFEp+62); + test__floatuntisf(0x7FFFFF0000000000, 0x1.FFFFFCp+62); + + test__floatuntisf(make_ti(0x8000008000000000, 0), 0x1.000001p+127); + test__floatuntisf(make_ti(0x8000000000000800, 0), 0x1.0p+127); + test__floatuntisf(make_ti(0x8000010000000000, 0), 0x1.000002p+127); + + test__floatuntisf(make_ti(0x8000000000000000, 0), 0x1.000000p+127); + + test__floatuntisf(0x0007FB72E8000000, 0x1.FEDCBAp+50); + + test__floatuntisf(0x0007FB72EA000000, 0x1.FEDCBA8p+50); + test__floatuntisf(0x0007FB72EB000000, 0x1.FEDCBACp+50); + + test__floatuntisf(0x0007FB72EC000000, 0x1.FEDCBBp+50); + + test__floatuntisf(0x0007FB72E6000000, 0x1.FEDCB98p+50); + test__floatuntisf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50); + test__floatuntisf(0x0007FB72E4000000, 0x1.FEDCB9p+50); + + test__floatuntisf(0xFFFFFFFFFFFFFFFE, 0x1p+64); + test__floatuntisf(0xFFFFFFFFFFFFFFFF, 0x1p+64); + + test__floatuntisf(0x0007FB72E8000000, 0x1.FEDCBAp+50); + + test__floatuntisf(0x0007FB72EA000000, 0x1.FEDCBAp+50); + test__floatuntisf(0x0007FB72EB000000, 0x1.FEDCBAp+50); + test__floatuntisf(0x0007FB72EBFFFFFF, 0x1.FEDCBAp+50); + test__floatuntisf(0x0007FB72EC000000, 0x1.FEDCBCp+50); + test__floatuntisf(0x0007FB72E8000001, 0x1.FEDCBAp+50); + + test__floatuntisf(0x0007FB72E6000000, 0x1.FEDCBAp+50); + test__floatuntisf(0x0007FB72E7000000, 0x1.FEDCBAp+50); + test__floatuntisf(0x0007FB72E7FFFFFF, 0x1.FEDCBAp+50); + test__floatuntisf(0x0007FB72E4000001, 0x1.FEDCBAp+50); + test__floatuntisf(0x0007FB72E4000000, 0x1.FEDCB8p+50); + + test__floatuntisf(make_ti(0x0000000000001FED, 0xCB90000000000001), 0x1.FEDCBAp+76); + test__floatuntisf(make_ti(0x0000000000001FED, 0xCBA0000000000000), 0x1.FEDCBAp+76); + test__floatuntisf(make_ti(0x0000000000001FED, 0xCBAFFFFFFFFFFFFF), 0x1.FEDCBAp+76); + test__floatuntisf(make_ti(0x0000000000001FED, 0xCBB0000000000000), 0x1.FEDCBCp+76); + test__floatuntisf(make_ti(0x0000000000001FED, 0xCBB0000000000001), 0x1.FEDCBCp+76); + test__floatuntisf(make_ti(0x0000000000001FED, 0xCBBFFFFFFFFFFFFF), 0x1.FEDCBCp+76); + test__floatuntisf(make_ti(0x0000000000001FED, 0xCBC0000000000000), 0x1.FEDCBCp+76); + test__floatuntisf(make_ti(0x0000000000001FED, 0xCBC0000000000001), 0x1.FEDCBCp+76); + test__floatuntisf(make_ti(0x0000000000001FED, 0xCBD0000000000000), 0x1.FEDCBCp+76); + test__floatuntisf(make_ti(0x0000000000001FED, 0xCBD0000000000001), 0x1.FEDCBEp+76); + test__floatuntisf(make_ti(0x0000000000001FED, 0xCBDFFFFFFFFFFFFF), 0x1.FEDCBEp+76); + test__floatuntisf(make_ti(0x0000000000001FED, 0xCBE0000000000000), 0x1.FEDCBEp+76); +} + +fn trimStart(slice: []const u8, ch: u8) []const u8 { + var i: usize = 0; + const test_string = "test\"string"; + for (slice) |b| { + if (b == '\xa3') break; + if (b == '\ua3d3') break; + if (b == '\Ua3d3d3') break; + if (b == '\t') break; + if (b == '\n') break; + if (b == '\\') break; + if (b == '\'') break; + if (b == '"') break; + if (b != 'n') break; + if (b != '-') break; + i += 1; + } + + return slice[i..]; +} diff --git a/tests/examplefiles/notmuch_example b/tests/examplefiles/notmuch_example new file mode 100644 index 0000000..61be8c6 --- /dev/null +++ b/tests/examplefiles/notmuch_example @@ -0,0 +1,15 @@ + message{ id:5d0693e2.1c69fb81.d5fc9.1f6e@mx.google.com depth:0 match:1 excluded:0 filename:/home/user/mail/INBOX/new/1560712171_0.11014.blue,U=20254,FMD5=7e33429f656f1e6e9d79b29c3f82c57e:2, + header{ +John Doe (1 mins. ago) (inbox unread) +Subject: Hello world! +From: john.doe@example.com +Date: Sun, 16 Jun 2019 16:00:00 -0300 + header} + body{ + part{ ID: 1, Content-type: text/plain +#!/bin/sh + +echo 'Hello world!' + part} + body} + message} diff --git a/tests/examplefiles/output/99_bottles_of_beer.chpl b/tests/examplefiles/output/99_bottles_of_beer.chpl index 8deeeb3..0a00fa5 100644 Binary files a/tests/examplefiles/output/99_bottles_of_beer.chpl and b/tests/examplefiles/output/99_bottles_of_beer.chpl differ diff --git a/tests/examplefiles/output/AcidStateAdvanced.hs b/tests/examplefiles/output/AcidStateAdvanced.hs index 82c613d..718f515 100644 Binary files a/tests/examplefiles/output/AcidStateAdvanced.hs and b/tests/examplefiles/output/AcidStateAdvanced.hs differ diff --git a/tests/examplefiles/output/AlternatingGroup.mu b/tests/examplefiles/output/AlternatingGroup.mu index 0dd4e11..f52b19d 100644 Binary files a/tests/examplefiles/output/AlternatingGroup.mu and b/tests/examplefiles/output/AlternatingGroup.mu differ diff --git a/tests/examplefiles/output/BOM.js b/tests/examplefiles/output/BOM.js index 241487d..ba53683 100644 Binary files a/tests/examplefiles/output/BOM.js and b/tests/examplefiles/output/BOM.js differ diff --git a/tests/examplefiles/output/Blink.ino b/tests/examplefiles/output/Blink.ino index 3cc6598..39f9aba 100644 Binary files a/tests/examplefiles/output/Blink.ino and b/tests/examplefiles/output/Blink.ino differ diff --git a/tests/examplefiles/output/CPDictionary.j b/tests/examplefiles/output/CPDictionary.j index 6247fc9..f946e82 100644 Binary files a/tests/examplefiles/output/CPDictionary.j and b/tests/examplefiles/output/CPDictionary.j differ diff --git a/tests/examplefiles/output/Charmci.ci b/tests/examplefiles/output/Charmci.ci index f822b1e..da25afc 100644 Binary files a/tests/examplefiles/output/Charmci.ci and b/tests/examplefiles/output/Charmci.ci differ diff --git a/tests/examplefiles/output/Config.in.cache b/tests/examplefiles/output/Config.in.cache index a17be09..db17942 100644 Binary files a/tests/examplefiles/output/Config.in.cache and b/tests/examplefiles/output/Config.in.cache differ diff --git a/tests/examplefiles/output/Constants.mo b/tests/examplefiles/output/Constants.mo index 80f2114..e632c6f 100644 Binary files a/tests/examplefiles/output/Constants.mo and b/tests/examplefiles/output/Constants.mo differ diff --git a/tests/examplefiles/output/DancingSudoku.lhs b/tests/examplefiles/output/DancingSudoku.lhs index 4fd7873..5c8f12e 100644 Binary files a/tests/examplefiles/output/DancingSudoku.lhs and b/tests/examplefiles/output/DancingSudoku.lhs differ diff --git a/tests/examplefiles/output/Deflate.fs b/tests/examplefiles/output/Deflate.fs index acf9a2b..0adebeb 100644 Binary files a/tests/examplefiles/output/Deflate.fs and b/tests/examplefiles/output/Deflate.fs differ diff --git a/tests/examplefiles/output/Error.pmod b/tests/examplefiles/output/Error.pmod index 5f59f90..435bed8 100644 Binary files a/tests/examplefiles/output/Error.pmod and b/tests/examplefiles/output/Error.pmod differ diff --git a/tests/examplefiles/output/Errors.scala b/tests/examplefiles/output/Errors.scala index 320cabc..847a7c4 100644 Binary files a/tests/examplefiles/output/Errors.scala and b/tests/examplefiles/output/Errors.scala differ diff --git a/tests/examplefiles/output/FakeFile.pike b/tests/examplefiles/output/FakeFile.pike index ab14b55..8ed0be5 100644 Binary files a/tests/examplefiles/output/FakeFile.pike and b/tests/examplefiles/output/FakeFile.pike differ diff --git a/tests/examplefiles/output/Get-CommandDefinitionHtml.ps1 b/tests/examplefiles/output/Get-CommandDefinitionHtml.ps1 index d682075..277c214 100644 Binary files a/tests/examplefiles/output/Get-CommandDefinitionHtml.ps1 and b/tests/examplefiles/output/Get-CommandDefinitionHtml.ps1 differ diff --git a/tests/examplefiles/output/IPDispatchC.nc b/tests/examplefiles/output/IPDispatchC.nc index 69f8aad..9293b0c 100644 Binary files a/tests/examplefiles/output/IPDispatchC.nc and b/tests/examplefiles/output/IPDispatchC.nc differ diff --git a/tests/examplefiles/output/IPDispatchP.nc b/tests/examplefiles/output/IPDispatchP.nc index c6bd0bf..a777073 100644 Binary files a/tests/examplefiles/output/IPDispatchP.nc and b/tests/examplefiles/output/IPDispatchP.nc differ diff --git a/tests/examplefiles/output/Intro.java b/tests/examplefiles/output/Intro.java index 37355e3..2812fe3 100644 Binary files a/tests/examplefiles/output/Intro.java and b/tests/examplefiles/output/Intro.java differ diff --git a/tests/examplefiles/output/MIME_example.eml b/tests/examplefiles/output/MIME_example.eml new file mode 100644 index 0000000..071c3db Binary files /dev/null and b/tests/examplefiles/output/MIME_example.eml differ diff --git a/tests/examplefiles/output/Makefile b/tests/examplefiles/output/Makefile index 14ff256..d6e77d3 100644 Binary files a/tests/examplefiles/output/Makefile and b/tests/examplefiles/output/Makefile differ diff --git a/tests/examplefiles/output/Object.st b/tests/examplefiles/output/Object.st index 0aa13b4..d0faa8d 100644 Binary files a/tests/examplefiles/output/Object.st and b/tests/examplefiles/output/Object.st differ diff --git a/tests/examplefiles/output/OrderedMap.hx b/tests/examplefiles/output/OrderedMap.hx index 3338f68..8902ebf 100644 Binary files a/tests/examplefiles/output/OrderedMap.hx and b/tests/examplefiles/output/OrderedMap.hx differ diff --git a/tests/examplefiles/output/RoleQ.pm6 b/tests/examplefiles/output/RoleQ.pm6 index a63b5e2..59708d2 100644 Binary files a/tests/examplefiles/output/RoleQ.pm6 and b/tests/examplefiles/output/RoleQ.pm6 differ diff --git a/tests/examplefiles/output/SmallCheck.hs b/tests/examplefiles/output/SmallCheck.hs index b2fe309..ff5778f 100644 Binary files a/tests/examplefiles/output/SmallCheck.hs and b/tests/examplefiles/output/SmallCheck.hs differ diff --git a/tests/examplefiles/output/Sorting.mod b/tests/examplefiles/output/Sorting.mod index d844378..b34d2dd 100644 Binary files a/tests/examplefiles/output/Sorting.mod and b/tests/examplefiles/output/Sorting.mod differ diff --git a/tests/examplefiles/output/StdGeneric.icl b/tests/examplefiles/output/StdGeneric.icl index aac1380..0d6dc1c 100644 Binary files a/tests/examplefiles/output/StdGeneric.icl and b/tests/examplefiles/output/StdGeneric.icl differ diff --git a/tests/examplefiles/output/Sudoku.lhs b/tests/examplefiles/output/Sudoku.lhs index a6b70e7..eb2b1ea 100644 Binary files a/tests/examplefiles/output/Sudoku.lhs and b/tests/examplefiles/output/Sudoku.lhs differ diff --git a/tests/examplefiles/output/abnf_example1.abnf b/tests/examplefiles/output/abnf_example1.abnf index b05ea25..ad27136 100644 Binary files a/tests/examplefiles/output/abnf_example1.abnf and b/tests/examplefiles/output/abnf_example1.abnf differ diff --git a/tests/examplefiles/output/abnf_example2.abnf b/tests/examplefiles/output/abnf_example2.abnf index 0087bae..9ea5be6 100644 Binary files a/tests/examplefiles/output/abnf_example2.abnf and b/tests/examplefiles/output/abnf_example2.abnf differ diff --git a/tests/examplefiles/output/addressbook.proto b/tests/examplefiles/output/addressbook.proto index 33b0071..7519843 100644 Binary files a/tests/examplefiles/output/addressbook.proto and b/tests/examplefiles/output/addressbook.proto differ diff --git a/tests/examplefiles/output/ahcon.f b/tests/examplefiles/output/ahcon.f index 2ab2719..09bb186 100644 Binary files a/tests/examplefiles/output/ahcon.f and b/tests/examplefiles/output/ahcon.f differ diff --git a/tests/examplefiles/output/all.nit b/tests/examplefiles/output/all.nit index f6db8a4..6ba8de5 100644 Binary files a/tests/examplefiles/output/all.nit and b/tests/examplefiles/output/all.nit differ diff --git a/tests/examplefiles/output/antlr_ANTLRv3.g b/tests/examplefiles/output/antlr_ANTLRv3.g index b9f77f0..db544a9 100644 Binary files a/tests/examplefiles/output/antlr_ANTLRv3.g and b/tests/examplefiles/output/antlr_ANTLRv3.g differ diff --git a/tests/examplefiles/output/antlr_throws b/tests/examplefiles/output/antlr_throws index c849d70..a1d9bcd 100644 Binary files a/tests/examplefiles/output/antlr_throws and b/tests/examplefiles/output/antlr_throws differ diff --git a/tests/examplefiles/output/apache2.conf b/tests/examplefiles/output/apache2.conf index 93d4ebc..d65d245 100644 Binary files a/tests/examplefiles/output/apache2.conf and b/tests/examplefiles/output/apache2.conf differ diff --git a/tests/examplefiles/output/as3_test.as b/tests/examplefiles/output/as3_test.as index d66c0da..e3a653c 100644 Binary files a/tests/examplefiles/output/as3_test.as and b/tests/examplefiles/output/as3_test.as differ diff --git a/tests/examplefiles/output/as3_test2.as b/tests/examplefiles/output/as3_test2.as index 572c8e2..b0a17c3 100644 Binary files a/tests/examplefiles/output/as3_test2.as and b/tests/examplefiles/output/as3_test2.as differ diff --git a/tests/examplefiles/output/as3_test3.as b/tests/examplefiles/output/as3_test3.as index 20d26b6..a9d509d 100644 Binary files a/tests/examplefiles/output/as3_test3.as and b/tests/examplefiles/output/as3_test3.as differ diff --git a/tests/examplefiles/output/aspx-cs_example b/tests/examplefiles/output/aspx-cs_example index 92e1d55..5c762ba 100644 Binary files a/tests/examplefiles/output/aspx-cs_example and b/tests/examplefiles/output/aspx-cs_example differ diff --git a/tests/examplefiles/output/autoit_submit.au3 b/tests/examplefiles/output/autoit_submit.au3 index e19515a..90f1fd1 100644 Binary files a/tests/examplefiles/output/autoit_submit.au3 and b/tests/examplefiles/output/autoit_submit.au3 differ diff --git a/tests/examplefiles/output/automake.mk b/tests/examplefiles/output/automake.mk index 27da9da..1a2896f 100644 Binary files a/tests/examplefiles/output/automake.mk and b/tests/examplefiles/output/automake.mk differ diff --git a/tests/examplefiles/output/badcase.java b/tests/examplefiles/output/badcase.java index d0d6777..ce65b5d 100644 Binary files a/tests/examplefiles/output/badcase.java and b/tests/examplefiles/output/badcase.java differ diff --git a/tests/examplefiles/output/bigtest.nsi b/tests/examplefiles/output/bigtest.nsi index dee3424..b168d18 100644 Binary files a/tests/examplefiles/output/bigtest.nsi and b/tests/examplefiles/output/bigtest.nsi differ diff --git a/tests/examplefiles/output/bnf_example1.bnf b/tests/examplefiles/output/bnf_example1.bnf index b7e3ad4..6e5fb54 100644 Binary files a/tests/examplefiles/output/bnf_example1.bnf and b/tests/examplefiles/output/bnf_example1.bnf differ diff --git a/tests/examplefiles/output/boot-9.scm b/tests/examplefiles/output/boot-9.scm index 8b64170..0eb7700 100644 Binary files a/tests/examplefiles/output/boot-9.scm and b/tests/examplefiles/output/boot-9.scm differ diff --git a/tests/examplefiles/output/ca65_example b/tests/examplefiles/output/ca65_example index fd6c230..4cb5304 100644 Binary files a/tests/examplefiles/output/ca65_example and b/tests/examplefiles/output/ca65_example differ diff --git a/tests/examplefiles/output/capdl_example.cdl b/tests/examplefiles/output/capdl_example.cdl index 18d5d25..ce37439 100644 Binary files a/tests/examplefiles/output/capdl_example.cdl and b/tests/examplefiles/output/capdl_example.cdl differ diff --git a/tests/examplefiles/output/cbmbas_example b/tests/examplefiles/output/cbmbas_example index c28016d..e6251ca 100644 Binary files a/tests/examplefiles/output/cbmbas_example and b/tests/examplefiles/output/cbmbas_example differ diff --git a/tests/examplefiles/output/cells.ps b/tests/examplefiles/output/cells.ps index cbe4a81..96355c8 100644 Binary files a/tests/examplefiles/output/cells.ps and b/tests/examplefiles/output/cells.ps differ diff --git a/tests/examplefiles/output/ceval.c b/tests/examplefiles/output/ceval.c index 06ab7b1..e126d36 100644 Binary files a/tests/examplefiles/output/ceval.c and b/tests/examplefiles/output/ceval.c differ diff --git a/tests/examplefiles/output/char.scala b/tests/examplefiles/output/char.scala index f014059..de022ea 100644 Binary files a/tests/examplefiles/output/char.scala and b/tests/examplefiles/output/char.scala differ diff --git a/tests/examplefiles/output/cheetah_example.html b/tests/examplefiles/output/cheetah_example.html index 5da8cc5..d931adc 100644 Binary files a/tests/examplefiles/output/cheetah_example.html and b/tests/examplefiles/output/cheetah_example.html differ diff --git a/tests/examplefiles/output/classes.dylan b/tests/examplefiles/output/classes.dylan index 8f6a1fe..3bfba1e 100644 Binary files a/tests/examplefiles/output/classes.dylan and b/tests/examplefiles/output/classes.dylan differ diff --git a/tests/examplefiles/output/clojure-weird-keywords.clj b/tests/examplefiles/output/clojure-weird-keywords.clj index dac39c0..95b170e 100644 Binary files a/tests/examplefiles/output/clojure-weird-keywords.clj and b/tests/examplefiles/output/clojure-weird-keywords.clj differ diff --git a/tests/examplefiles/output/condensed_ruby.rb b/tests/examplefiles/output/condensed_ruby.rb index a3d3b2c..5d2b170 100644 Binary files a/tests/examplefiles/output/condensed_ruby.rb and b/tests/examplefiles/output/condensed_ruby.rb differ diff --git a/tests/examplefiles/output/coq_RelationClasses b/tests/examplefiles/output/coq_RelationClasses index cc56a42..aee39da 100644 Binary files a/tests/examplefiles/output/coq_RelationClasses and b/tests/examplefiles/output/coq_RelationClasses differ diff --git a/tests/examplefiles/output/core.cljs b/tests/examplefiles/output/core.cljs index 76b8635..7a4ac56 100644 Binary files a/tests/examplefiles/output/core.cljs and b/tests/examplefiles/output/core.cljs differ diff --git a/tests/examplefiles/output/database.pytb b/tests/examplefiles/output/database.pytb index f89291b..903ff42 100644 Binary files a/tests/examplefiles/output/database.pytb and b/tests/examplefiles/output/database.pytb differ diff --git a/tests/examplefiles/output/de.MoinMoin.po b/tests/examplefiles/output/de.MoinMoin.po index 73e8688..a24be37 100644 Binary files a/tests/examplefiles/output/de.MoinMoin.po and b/tests/examplefiles/output/de.MoinMoin.po differ diff --git a/tests/examplefiles/output/demo.ahk b/tests/examplefiles/output/demo.ahk index 13061da..e22a733 100644 Binary files a/tests/examplefiles/output/demo.ahk and b/tests/examplefiles/output/demo.ahk differ diff --git a/tests/examplefiles/output/demo.cfm b/tests/examplefiles/output/demo.cfm index 7b72a46..be99886 100644 Binary files a/tests/examplefiles/output/demo.cfm and b/tests/examplefiles/output/demo.cfm differ diff --git a/tests/examplefiles/output/demo.css.in b/tests/examplefiles/output/demo.css.in index 706ac2c..d0bd697 100644 Binary files a/tests/examplefiles/output/demo.css.in and b/tests/examplefiles/output/demo.css.in differ diff --git a/tests/examplefiles/output/demo.frt b/tests/examplefiles/output/demo.frt index 10d6894..670f41a 100644 Binary files a/tests/examplefiles/output/demo.frt and b/tests/examplefiles/output/demo.frt differ diff --git a/tests/examplefiles/output/demo.hbs b/tests/examplefiles/output/demo.hbs index e9f20aa..e0c1eb3 100644 Binary files a/tests/examplefiles/output/demo.hbs and b/tests/examplefiles/output/demo.hbs differ diff --git a/tests/examplefiles/output/demo.js.in b/tests/examplefiles/output/demo.js.in index a5fd72e..9f49521 100644 Binary files a/tests/examplefiles/output/demo.js.in and b/tests/examplefiles/output/demo.js.in differ diff --git a/tests/examplefiles/output/demo.thrift b/tests/examplefiles/output/demo.thrift index fbb79dc..73b9316 100644 Binary files a/tests/examplefiles/output/demo.thrift and b/tests/examplefiles/output/demo.thrift differ diff --git a/tests/examplefiles/output/demo.xul.in b/tests/examplefiles/output/demo.xul.in index c6b7fb8..4a7349c 100644 Binary files a/tests/examplefiles/output/demo.xul.in and b/tests/examplefiles/output/demo.xul.in differ diff --git a/tests/examplefiles/output/django_sample.html+django b/tests/examplefiles/output/django_sample.html+django index 4a9e360..86b4615 100644 Binary files a/tests/examplefiles/output/django_sample.html+django and b/tests/examplefiles/output/django_sample.html+django differ diff --git a/tests/examplefiles/output/docker.docker b/tests/examplefiles/output/docker.docker index 69428d8..abead67 100644 Binary files a/tests/examplefiles/output/docker.docker and b/tests/examplefiles/output/docker.docker differ diff --git a/tests/examplefiles/output/durexmania.aheui b/tests/examplefiles/output/durexmania.aheui index d29fb58..f6319ae 100644 Binary files a/tests/examplefiles/output/durexmania.aheui and b/tests/examplefiles/output/durexmania.aheui differ diff --git a/tests/examplefiles/output/dwarf.cw b/tests/examplefiles/output/dwarf.cw index ca36a9f..7d368d1 100644 Binary files a/tests/examplefiles/output/dwarf.cw and b/tests/examplefiles/output/dwarf.cw differ diff --git a/tests/examplefiles/output/eg_example1.eg b/tests/examplefiles/output/eg_example1.eg index c018645..2d7f9f0 100644 Binary files a/tests/examplefiles/output/eg_example1.eg and b/tests/examplefiles/output/eg_example1.eg differ diff --git a/tests/examplefiles/output/ember.handlebars b/tests/examplefiles/output/ember.handlebars index 97c13e0..7acd5cb 100644 Binary files a/tests/examplefiles/output/ember.handlebars and b/tests/examplefiles/output/ember.handlebars differ diff --git a/tests/examplefiles/output/erl_session b/tests/examplefiles/output/erl_session index b227c19..fe3e1bd 100644 Binary files a/tests/examplefiles/output/erl_session and b/tests/examplefiles/output/erl_session differ diff --git a/tests/examplefiles/output/es6.js b/tests/examplefiles/output/es6.js index a8a103d..08420f2 100644 Binary files a/tests/examplefiles/output/es6.js and b/tests/examplefiles/output/es6.js differ diff --git a/tests/examplefiles/output/escape_semicolon.clj b/tests/examplefiles/output/escape_semicolon.clj index 1196f90..9ba518a 100644 Binary files a/tests/examplefiles/output/escape_semicolon.clj and b/tests/examplefiles/output/escape_semicolon.clj differ diff --git a/tests/examplefiles/output/eval.rs b/tests/examplefiles/output/eval.rs index cb82763..bc8b2bd 100644 Binary files a/tests/examplefiles/output/eval.rs and b/tests/examplefiles/output/eval.rs differ diff --git a/tests/examplefiles/output/evil_regex.js b/tests/examplefiles/output/evil_regex.js index e8e3f05..9e71ea1 100644 Binary files a/tests/examplefiles/output/evil_regex.js and b/tests/examplefiles/output/evil_regex.js differ diff --git a/tests/examplefiles/output/example.Rd b/tests/examplefiles/output/example.Rd index 4145e6e..fd99a04 100644 Binary files a/tests/examplefiles/output/example.Rd and b/tests/examplefiles/output/example.Rd differ diff --git a/tests/examplefiles/output/example.als b/tests/examplefiles/output/example.als index fd90915..867e945 100644 Binary files a/tests/examplefiles/output/example.als and b/tests/examplefiles/output/example.als differ diff --git a/tests/examplefiles/output/example.bat b/tests/examplefiles/output/example.bat index 38e2d39..0a70c95 100644 Binary files a/tests/examplefiles/output/example.bat and b/tests/examplefiles/output/example.bat differ diff --git a/tests/examplefiles/output/example.bbc b/tests/examplefiles/output/example.bbc index fb48ae2..b9d37f7 100644 Binary files a/tests/examplefiles/output/example.bbc and b/tests/examplefiles/output/example.bbc differ diff --git a/tests/examplefiles/output/example.bc b/tests/examplefiles/output/example.bc index 7e8a71a..955e118 100644 Binary files a/tests/examplefiles/output/example.bc and b/tests/examplefiles/output/example.bc differ diff --git a/tests/examplefiles/output/example.boa b/tests/examplefiles/output/example.boa index ba5469c..a2fdd9f 100644 Binary files a/tests/examplefiles/output/example.boa and b/tests/examplefiles/output/example.boa differ diff --git a/tests/examplefiles/output/example.bug b/tests/examplefiles/output/example.bug index 70e2aa2..85ae01a 100644 Binary files a/tests/examplefiles/output/example.bug and b/tests/examplefiles/output/example.bug differ diff --git a/tests/examplefiles/output/example.c b/tests/examplefiles/output/example.c index ff9a9ec..d1db6df 100644 Binary files a/tests/examplefiles/output/example.c and b/tests/examplefiles/output/example.c differ diff --git a/tests/examplefiles/output/example.ceylon b/tests/examplefiles/output/example.ceylon index 70e8afd..96f8b66 100644 Binary files a/tests/examplefiles/output/example.ceylon and b/tests/examplefiles/output/example.ceylon differ diff --git a/tests/examplefiles/output/example.chai b/tests/examplefiles/output/example.chai index c91660e..cb355fc 100644 Binary files a/tests/examplefiles/output/example.chai and b/tests/examplefiles/output/example.chai differ diff --git a/tests/examplefiles/output/example.clay b/tests/examplefiles/output/example.clay index bfc9a39..924c751 100644 Binary files a/tests/examplefiles/output/example.clay and b/tests/examplefiles/output/example.clay differ diff --git a/tests/examplefiles/output/example.cls b/tests/examplefiles/output/example.cls index daf6282..7cf0af7 100644 Binary files a/tests/examplefiles/output/example.cls and b/tests/examplefiles/output/example.cls differ diff --git a/tests/examplefiles/output/example.cob b/tests/examplefiles/output/example.cob index 22cee26..b6fcdd8 100644 Binary files a/tests/examplefiles/output/example.cob and b/tests/examplefiles/output/example.cob differ diff --git a/tests/examplefiles/output/example.coffee b/tests/examplefiles/output/example.coffee index 3522d59..05290fb 100644 Binary files a/tests/examplefiles/output/example.coffee and b/tests/examplefiles/output/example.coffee differ diff --git a/tests/examplefiles/output/example.cpp b/tests/examplefiles/output/example.cpp index e4d0e79..9967b29 100644 Binary files a/tests/examplefiles/output/example.cpp and b/tests/examplefiles/output/example.cpp differ diff --git a/tests/examplefiles/output/example.e b/tests/examplefiles/output/example.e index 497bf95..578b9e9 100644 Binary files a/tests/examplefiles/output/example.e and b/tests/examplefiles/output/example.e differ diff --git a/tests/examplefiles/output/example.elm b/tests/examplefiles/output/example.elm index 9cc0fea..a893369 100644 Binary files a/tests/examplefiles/output/example.elm and b/tests/examplefiles/output/example.elm differ diff --git a/tests/examplefiles/output/example.eml b/tests/examplefiles/output/example.eml new file mode 100644 index 0000000..7192256 Binary files /dev/null and b/tests/examplefiles/output/example.eml differ diff --git a/tests/examplefiles/output/example.ezt b/tests/examplefiles/output/example.ezt index 0fccb5a..32f1a11 100644 Binary files a/tests/examplefiles/output/example.ezt and b/tests/examplefiles/output/example.ezt differ diff --git a/tests/examplefiles/output/example.f90 b/tests/examplefiles/output/example.f90 index 70ccbd5..962f533 100644 Binary files a/tests/examplefiles/output/example.f90 and b/tests/examplefiles/output/example.f90 differ diff --git a/tests/examplefiles/output/example.feature b/tests/examplefiles/output/example.feature index 0ece0b6..efa791c 100644 Binary files a/tests/examplefiles/output/example.feature and b/tests/examplefiles/output/example.feature differ diff --git a/tests/examplefiles/output/example.fish b/tests/examplefiles/output/example.fish index 7974b14..de1c31c 100644 Binary files a/tests/examplefiles/output/example.fish and b/tests/examplefiles/output/example.fish differ diff --git a/tests/examplefiles/output/example.flo b/tests/examplefiles/output/example.flo index 2f6a973..24fd634 100644 Binary files a/tests/examplefiles/output/example.flo and b/tests/examplefiles/output/example.flo differ diff --git a/tests/examplefiles/output/example.gd b/tests/examplefiles/output/example.gd index 0e8a651..f9138aa 100644 Binary files a/tests/examplefiles/output/example.gd and b/tests/examplefiles/output/example.gd differ diff --git a/tests/examplefiles/output/example.gi b/tests/examplefiles/output/example.gi index 86e92fe..67cbb53 100644 Binary files a/tests/examplefiles/output/example.gi and b/tests/examplefiles/output/example.gi differ diff --git a/tests/examplefiles/output/example.golo b/tests/examplefiles/output/example.golo index 4f9d0e0..c50100e 100644 Binary files a/tests/examplefiles/output/example.golo and b/tests/examplefiles/output/example.golo differ diff --git a/tests/examplefiles/output/example.groovy b/tests/examplefiles/output/example.groovy index f1eb275..d1f11ce 100644 Binary files a/tests/examplefiles/output/example.groovy and b/tests/examplefiles/output/example.groovy differ diff --git a/tests/examplefiles/output/example.gs b/tests/examplefiles/output/example.gs index 4997b23..88fd1d8 100644 Binary files a/tests/examplefiles/output/example.gs and b/tests/examplefiles/output/example.gs differ diff --git a/tests/examplefiles/output/example.gst b/tests/examplefiles/output/example.gst index b23d60b..81729cc 100644 Binary files a/tests/examplefiles/output/example.gst and b/tests/examplefiles/output/example.gst differ diff --git a/tests/examplefiles/output/example.hlsl b/tests/examplefiles/output/example.hlsl index 5d5bb6a..67225f8 100644 Binary files a/tests/examplefiles/output/example.hlsl and b/tests/examplefiles/output/example.hlsl differ diff --git a/tests/examplefiles/output/example.hs b/tests/examplefiles/output/example.hs index aba5835..f77f6be 100644 Binary files a/tests/examplefiles/output/example.hs and b/tests/examplefiles/output/example.hs differ diff --git a/tests/examplefiles/output/example.hx b/tests/examplefiles/output/example.hx index 2426286..766e877 100644 Binary files a/tests/examplefiles/output/example.hx and b/tests/examplefiles/output/example.hx differ diff --git a/tests/examplefiles/output/example.i6t b/tests/examplefiles/output/example.i6t index e0647ae..0e31a7d 100644 Binary files a/tests/examplefiles/output/example.i6t and b/tests/examplefiles/output/example.i6t differ diff --git a/tests/examplefiles/output/example.i7x b/tests/examplefiles/output/example.i7x index bd75664..1f5e195 100644 Binary files a/tests/examplefiles/output/example.i7x and b/tests/examplefiles/output/example.i7x differ diff --git a/tests/examplefiles/output/example.icn b/tests/examplefiles/output/example.icn index 1f8f4cf..9b8854e 100644 Binary files a/tests/examplefiles/output/example.icn and b/tests/examplefiles/output/example.icn differ diff --git a/tests/examplefiles/output/example.icon b/tests/examplefiles/output/example.icon index d2b240f..84f58ed 100644 Binary files a/tests/examplefiles/output/example.icon and b/tests/examplefiles/output/example.icon differ diff --git a/tests/examplefiles/output/example.j b/tests/examplefiles/output/example.j index c2db467..73b6841 100644 Binary files a/tests/examplefiles/output/example.j and b/tests/examplefiles/output/example.j differ diff --git a/tests/examplefiles/output/example.jag b/tests/examplefiles/output/example.jag index 07ad4e5..05d8489 100644 Binary files a/tests/examplefiles/output/example.jag and b/tests/examplefiles/output/example.jag differ diff --git a/tests/examplefiles/output/example.java b/tests/examplefiles/output/example.java index 2971ef3..5a0fa02 100644 Binary files a/tests/examplefiles/output/example.java and b/tests/examplefiles/output/example.java differ diff --git a/tests/examplefiles/output/example.jcl b/tests/examplefiles/output/example.jcl index f7fce9e..db26c96 100644 Binary files a/tests/examplefiles/output/example.jcl and b/tests/examplefiles/output/example.jcl differ diff --git a/tests/examplefiles/output/example.jsgf b/tests/examplefiles/output/example.jsgf index 7fb69b2..0790842 100644 Binary files a/tests/examplefiles/output/example.jsgf and b/tests/examplefiles/output/example.jsgf differ diff --git a/tests/examplefiles/output/example.jsonld b/tests/examplefiles/output/example.jsonld index 1f213e4..1959c9c 100644 Binary files a/tests/examplefiles/output/example.jsonld and b/tests/examplefiles/output/example.jsonld differ diff --git a/tests/examplefiles/output/example.juttle b/tests/examplefiles/output/example.juttle index b6aed68..d98b1c7 100644 Binary files a/tests/examplefiles/output/example.juttle and b/tests/examplefiles/output/example.juttle differ diff --git a/tests/examplefiles/output/example.kal b/tests/examplefiles/output/example.kal index b5d8977..478c78d 100644 Binary files a/tests/examplefiles/output/example.kal and b/tests/examplefiles/output/example.kal differ diff --git a/tests/examplefiles/output/example.kt b/tests/examplefiles/output/example.kt index 6971411..a76633e 100644 Binary files a/tests/examplefiles/output/example.kt and b/tests/examplefiles/output/example.kt differ diff --git a/tests/examplefiles/output/example.lagda b/tests/examplefiles/output/example.lagda index 7f104f4..b7731d4 100644 Binary files a/tests/examplefiles/output/example.lagda and b/tests/examplefiles/output/example.lagda differ diff --git a/tests/examplefiles/output/example.liquid b/tests/examplefiles/output/example.liquid index 968cdb0..587a76b 100644 Binary files a/tests/examplefiles/output/example.liquid and b/tests/examplefiles/output/example.liquid differ diff --git a/tests/examplefiles/output/example.lua b/tests/examplefiles/output/example.lua index ab2eed8..9eb1e74 100644 Binary files a/tests/examplefiles/output/example.lua and b/tests/examplefiles/output/example.lua differ diff --git a/tests/examplefiles/output/example.ma b/tests/examplefiles/output/example.ma index 1a30f51..f136137 100644 Binary files a/tests/examplefiles/output/example.ma and b/tests/examplefiles/output/example.ma differ diff --git a/tests/examplefiles/output/example.mac b/tests/examplefiles/output/example.mac index 5c269a1..dad207a 100644 Binary files a/tests/examplefiles/output/example.mac and b/tests/examplefiles/output/example.mac differ diff --git a/tests/examplefiles/output/example.md b/tests/examplefiles/output/example.md index fdd1419..970de2f 100644 Binary files a/tests/examplefiles/output/example.md and b/tests/examplefiles/output/example.md differ diff --git a/tests/examplefiles/output/example.monkey b/tests/examplefiles/output/example.monkey index 54a7f83..e6e8a81 100644 Binary files a/tests/examplefiles/output/example.monkey and b/tests/examplefiles/output/example.monkey differ diff --git a/tests/examplefiles/output/example.moo b/tests/examplefiles/output/example.moo index 2d454f8..9bf2470 100644 Binary files a/tests/examplefiles/output/example.moo and b/tests/examplefiles/output/example.moo differ diff --git a/tests/examplefiles/output/example.moon b/tests/examplefiles/output/example.moon index 1aaf693..cfbb482 100644 Binary files a/tests/examplefiles/output/example.moon and b/tests/examplefiles/output/example.moon differ diff --git a/tests/examplefiles/output/example.mq4 b/tests/examplefiles/output/example.mq4 index 915edf0..f61d205 100644 Binary files a/tests/examplefiles/output/example.mq4 and b/tests/examplefiles/output/example.mq4 differ diff --git a/tests/examplefiles/output/example.mqh b/tests/examplefiles/output/example.mqh index d074816..2e22d5e 100644 Binary files a/tests/examplefiles/output/example.mqh and b/tests/examplefiles/output/example.mqh differ diff --git a/tests/examplefiles/output/example.msc b/tests/examplefiles/output/example.msc index ad33edc..a313159 100644 Binary files a/tests/examplefiles/output/example.msc and b/tests/examplefiles/output/example.msc differ diff --git a/tests/examplefiles/output/example.ng2 b/tests/examplefiles/output/example.ng2 index 19ac1a6..19fce3d 100644 Binary files a/tests/examplefiles/output/example.ng2 and b/tests/examplefiles/output/example.ng2 differ diff --git a/tests/examplefiles/output/example.ni b/tests/examplefiles/output/example.ni index 367a6b6..4fedaae 100644 Binary files a/tests/examplefiles/output/example.ni and b/tests/examplefiles/output/example.ni differ diff --git a/tests/examplefiles/output/example.nim b/tests/examplefiles/output/example.nim index a07e308..26ab67d 100644 Binary files a/tests/examplefiles/output/example.nim and b/tests/examplefiles/output/example.nim differ diff --git a/tests/examplefiles/output/example.nix b/tests/examplefiles/output/example.nix index 0f13ac1..7597072 100644 Binary files a/tests/examplefiles/output/example.nix and b/tests/examplefiles/output/example.nix differ diff --git a/tests/examplefiles/output/example.ns2 b/tests/examplefiles/output/example.ns2 index 5249576..cd6cc92 100644 Binary files a/tests/examplefiles/output/example.ns2 and b/tests/examplefiles/output/example.ns2 differ diff --git a/tests/examplefiles/output/example.pas b/tests/examplefiles/output/example.pas index 0701b12..e1cba90 100644 Binary files a/tests/examplefiles/output/example.pas and b/tests/examplefiles/output/example.pas differ diff --git a/tests/examplefiles/output/example.pcmk b/tests/examplefiles/output/example.pcmk index ebfa861..3742ede 100644 Binary files a/tests/examplefiles/output/example.pcmk and b/tests/examplefiles/output/example.pcmk differ diff --git a/tests/examplefiles/output/example.pony b/tests/examplefiles/output/example.pony index 02658b9..214e6a6 100644 Binary files a/tests/examplefiles/output/example.pony and b/tests/examplefiles/output/example.pony differ diff --git a/tests/examplefiles/output/example.pp b/tests/examplefiles/output/example.pp index 8cd2b14..0240000 100644 Binary files a/tests/examplefiles/output/example.pp and b/tests/examplefiles/output/example.pp differ diff --git a/tests/examplefiles/output/example.praat b/tests/examplefiles/output/example.praat index 172f3bd..c6187c3 100644 Binary files a/tests/examplefiles/output/example.praat and b/tests/examplefiles/output/example.praat differ diff --git a/tests/examplefiles/output/example.prg b/tests/examplefiles/output/example.prg index 5a842c4..af89b92 100644 Binary files a/tests/examplefiles/output/example.prg and b/tests/examplefiles/output/example.prg differ diff --git a/tests/examplefiles/output/example.rb b/tests/examplefiles/output/example.rb index debd9ac..fb18af1 100644 Binary files a/tests/examplefiles/output/example.rb and b/tests/examplefiles/output/example.rb differ diff --git a/tests/examplefiles/output/example.red b/tests/examplefiles/output/example.red index 3e4758c..27c9994 100644 Binary files a/tests/examplefiles/output/example.red and b/tests/examplefiles/output/example.red differ diff --git a/tests/examplefiles/output/example.reds b/tests/examplefiles/output/example.reds index f7209d6..3e81a0a 100644 Binary files a/tests/examplefiles/output/example.reds and b/tests/examplefiles/output/example.reds differ diff --git a/tests/examplefiles/output/example.reg b/tests/examplefiles/output/example.reg index 11538b2..679b559 100644 Binary files a/tests/examplefiles/output/example.reg and b/tests/examplefiles/output/example.reg differ diff --git a/tests/examplefiles/output/example.rexx b/tests/examplefiles/output/example.rexx index a8737d8..4770fe3 100644 Binary files a/tests/examplefiles/output/example.rexx and b/tests/examplefiles/output/example.rexx differ diff --git a/tests/examplefiles/output/example.rhtml b/tests/examplefiles/output/example.rhtml index aacee11..e990a45 100644 Binary files a/tests/examplefiles/output/example.rhtml and b/tests/examplefiles/output/example.rhtml differ diff --git a/tests/examplefiles/output/example.rkt b/tests/examplefiles/output/example.rkt index 26f5253..fd8c984 100644 Binary files a/tests/examplefiles/output/example.rkt and b/tests/examplefiles/output/example.rkt differ diff --git a/tests/examplefiles/output/example.rpf b/tests/examplefiles/output/example.rpf index 22d06b1..672fcb6 100644 Binary files a/tests/examplefiles/output/example.rpf and b/tests/examplefiles/output/example.rpf differ diff --git a/tests/examplefiles/output/example.rts b/tests/examplefiles/output/example.rts index 37f2649..ba78ed2 100644 Binary files a/tests/examplefiles/output/example.rts and b/tests/examplefiles/output/example.rts differ diff --git a/tests/examplefiles/output/example.sbl b/tests/examplefiles/output/example.sbl index 004c4bb..5c950be 100644 Binary files a/tests/examplefiles/output/example.sbl and b/tests/examplefiles/output/example.sbl differ diff --git a/tests/examplefiles/output/example.scd b/tests/examplefiles/output/example.scd index 0362c70..8e6c17d 100644 Binary files a/tests/examplefiles/output/example.scd and b/tests/examplefiles/output/example.scd differ diff --git a/tests/examplefiles/output/example.sgf b/tests/examplefiles/output/example.sgf index 2ca3282..9e5cdae 100644 Binary files a/tests/examplefiles/output/example.sgf and b/tests/examplefiles/output/example.sgf differ diff --git a/tests/examplefiles/output/example.sh b/tests/examplefiles/output/example.sh index 489bdac..9093c63 100644 Binary files a/tests/examplefiles/output/example.sh and b/tests/examplefiles/output/example.sh differ diff --git a/tests/examplefiles/output/example.sh-session b/tests/examplefiles/output/example.sh-session index a8146b0..cebcbfd 100644 Binary files a/tests/examplefiles/output/example.sh-session and b/tests/examplefiles/output/example.sh-session differ diff --git a/tests/examplefiles/output/example.shell-session b/tests/examplefiles/output/example.shell-session index 55fc4bd..93953b3 100644 Binary files a/tests/examplefiles/output/example.shell-session and b/tests/examplefiles/output/example.shell-session differ diff --git a/tests/examplefiles/output/example.shex b/tests/examplefiles/output/example.shex new file mode 100644 index 0000000..3506d28 Binary files /dev/null and b/tests/examplefiles/output/example.shex differ diff --git a/tests/examplefiles/output/example.sl b/tests/examplefiles/output/example.sl index 0e3e804..c2e0b05 100644 Binary files a/tests/examplefiles/output/example.sl and b/tests/examplefiles/output/example.sl differ diff --git a/tests/examplefiles/output/example.slim b/tests/examplefiles/output/example.slim index 7751e28..f5ffd4c 100644 Binary files a/tests/examplefiles/output/example.slim and b/tests/examplefiles/output/example.slim differ diff --git a/tests/examplefiles/output/example.sls b/tests/examplefiles/output/example.sls index 0d3aa3a..0f4ceb6 100644 Binary files a/tests/examplefiles/output/example.sls and b/tests/examplefiles/output/example.sls differ diff --git a/tests/examplefiles/output/example.sml b/tests/examplefiles/output/example.sml index 87a82c4..bdffd00 100644 Binary files a/tests/examplefiles/output/example.sml and b/tests/examplefiles/output/example.sml differ diff --git a/tests/examplefiles/output/example.snobol b/tests/examplefiles/output/example.snobol index f5d06de..39aecef 100644 Binary files a/tests/examplefiles/output/example.snobol and b/tests/examplefiles/output/example.snobol differ diff --git a/tests/examplefiles/output/example.stan b/tests/examplefiles/output/example.stan index f05262a..33c532b 100644 Binary files a/tests/examplefiles/output/example.stan and b/tests/examplefiles/output/example.stan differ diff --git a/tests/examplefiles/output/example.tap b/tests/examplefiles/output/example.tap index 73e6e9c..e4e229a 100644 Binary files a/tests/examplefiles/output/example.tap and b/tests/examplefiles/output/example.tap differ diff --git a/tests/examplefiles/output/example.tasm b/tests/examplefiles/output/example.tasm index ee655db..6c45fad 100644 Binary files a/tests/examplefiles/output/example.tasm and b/tests/examplefiles/output/example.tasm differ diff --git a/tests/examplefiles/output/example.tea b/tests/examplefiles/output/example.tea index 70f3d03..2d05d65 100644 Binary files a/tests/examplefiles/output/example.tea and b/tests/examplefiles/output/example.tea differ diff --git a/tests/examplefiles/output/example.tf b/tests/examplefiles/output/example.tf index bc0c532..712811e 100644 Binary files a/tests/examplefiles/output/example.tf and b/tests/examplefiles/output/example.tf differ diff --git a/tests/examplefiles/output/example.thy b/tests/examplefiles/output/example.thy index 1aa969c..ee42f55 100644 Binary files a/tests/examplefiles/output/example.thy and b/tests/examplefiles/output/example.thy differ diff --git a/tests/examplefiles/output/example.todotxt b/tests/examplefiles/output/example.todotxt index 42865cd..11adda9 100644 Binary files a/tests/examplefiles/output/example.todotxt and b/tests/examplefiles/output/example.todotxt differ diff --git a/tests/examplefiles/output/example.toml b/tests/examplefiles/output/example.toml index 1b46958..4a443e3 100644 Binary files a/tests/examplefiles/output/example.toml and b/tests/examplefiles/output/example.toml differ diff --git a/tests/examplefiles/output/example.ttl b/tests/examplefiles/output/example.ttl index eb209e3..df5cd2b 100644 Binary files a/tests/examplefiles/output/example.ttl and b/tests/examplefiles/output/example.ttl differ diff --git a/tests/examplefiles/output/example.u b/tests/examplefiles/output/example.u index 5436718..6c08520 100644 Binary files a/tests/examplefiles/output/example.u and b/tests/examplefiles/output/example.u differ diff --git a/tests/examplefiles/output/example.u1 b/tests/examplefiles/output/example.u1 index 6129506..19c108b 100644 Binary files a/tests/examplefiles/output/example.u1 and b/tests/examplefiles/output/example.u1 differ diff --git a/tests/examplefiles/output/example.vbs b/tests/examplefiles/output/example.vbs index c895f4e..bf02ffc 100644 Binary files a/tests/examplefiles/output/example.vbs and b/tests/examplefiles/output/example.vbs differ diff --git a/tests/examplefiles/output/example.weechatlog b/tests/examplefiles/output/example.weechatlog index a43963e..64988f8 100644 Binary files a/tests/examplefiles/output/example.weechatlog and b/tests/examplefiles/output/example.weechatlog differ diff --git a/tests/examplefiles/output/example.whiley b/tests/examplefiles/output/example.whiley index 30c9185..e9b7c01 100644 Binary files a/tests/examplefiles/output/example.whiley and b/tests/examplefiles/output/example.whiley differ diff --git a/tests/examplefiles/output/example.x10 b/tests/examplefiles/output/example.x10 index 14d9672..8dc9253 100644 Binary files a/tests/examplefiles/output/example.x10 and b/tests/examplefiles/output/example.x10 differ diff --git a/tests/examplefiles/output/example.xhtml b/tests/examplefiles/output/example.xhtml index df40f43..540c5b1 100644 Binary files a/tests/examplefiles/output/example.xhtml and b/tests/examplefiles/output/example.xhtml differ diff --git a/tests/examplefiles/output/example.xtend b/tests/examplefiles/output/example.xtend index 23fa72b..b67d256 100644 Binary files a/tests/examplefiles/output/example.xtend and b/tests/examplefiles/output/example.xtend differ diff --git a/tests/examplefiles/output/example.xtm b/tests/examplefiles/output/example.xtm index 145610c..16db560 100644 Binary files a/tests/examplefiles/output/example.xtm and b/tests/examplefiles/output/example.xtm differ diff --git a/tests/examplefiles/output/example.yaml b/tests/examplefiles/output/example.yaml index 7c2d1d0..05b8915 100644 Binary files a/tests/examplefiles/output/example.yaml and b/tests/examplefiles/output/example.yaml differ diff --git a/tests/examplefiles/output/example.zig b/tests/examplefiles/output/example.zig new file mode 100644 index 0000000..9f22410 Binary files /dev/null and b/tests/examplefiles/output/example.zig differ diff --git a/tests/examplefiles/output/example1.cadl b/tests/examplefiles/output/example1.cadl index 652181d..61f8e1e 100644 Binary files a/tests/examplefiles/output/example1.cadl and b/tests/examplefiles/output/example1.cadl differ diff --git a/tests/examplefiles/output/example2.aspx b/tests/examplefiles/output/example2.aspx index 135c1ab..21b988e 100644 Binary files a/tests/examplefiles/output/example2.aspx and b/tests/examplefiles/output/example2.aspx differ diff --git a/tests/examplefiles/output/example2.cpp b/tests/examplefiles/output/example2.cpp index 79d042a..196bd32 100644 Binary files a/tests/examplefiles/output/example2.cpp and b/tests/examplefiles/output/example2.cpp differ diff --git a/tests/examplefiles/output/example2.msc b/tests/examplefiles/output/example2.msc index bb55e2f..36dd70c 100644 Binary files a/tests/examplefiles/output/example2.msc and b/tests/examplefiles/output/example2.msc differ diff --git a/tests/examplefiles/output/exampleScript.cfc b/tests/examplefiles/output/exampleScript.cfc index ddaa98d..27beb20 100644 Binary files a/tests/examplefiles/output/exampleScript.cfc and b/tests/examplefiles/output/exampleScript.cfc differ diff --git a/tests/examplefiles/output/exampleTag.cfc b/tests/examplefiles/output/exampleTag.cfc index 4e0f9e9..2e0c664 100644 Binary files a/tests/examplefiles/output/exampleTag.cfc and b/tests/examplefiles/output/exampleTag.cfc differ diff --git a/tests/examplefiles/output/example_coq.v b/tests/examplefiles/output/example_coq.v index bf38435..62f0e12 100644 Binary files a/tests/examplefiles/output/example_coq.v and b/tests/examplefiles/output/example_coq.v differ diff --git a/tests/examplefiles/output/example_elixir.ex b/tests/examplefiles/output/example_elixir.ex index 23dd791..c9ba1c5 100644 Binary files a/tests/examplefiles/output/example_elixir.ex and b/tests/examplefiles/output/example_elixir.ex differ diff --git a/tests/examplefiles/output/example_file.fy b/tests/examplefiles/output/example_file.fy index a2a825f..65cc0b3 100644 Binary files a/tests/examplefiles/output/example_file.fy and b/tests/examplefiles/output/example_file.fy differ diff --git a/tests/examplefiles/output/ezhil_primefactors.n b/tests/examplefiles/output/ezhil_primefactors.n index 27d1ff1..e1303ad 100644 Binary files a/tests/examplefiles/output/ezhil_primefactors.n and b/tests/examplefiles/output/ezhil_primefactors.n differ diff --git a/tests/examplefiles/output/fennelview.fnl b/tests/examplefiles/output/fennelview.fnl index 2f9b5e5..766db17 100644 Binary files a/tests/examplefiles/output/fennelview.fnl and b/tests/examplefiles/output/fennelview.fnl differ diff --git a/tests/examplefiles/output/fibonacci.tokigun.aheui b/tests/examplefiles/output/fibonacci.tokigun.aheui index 9ca1585..96a1d68 100644 Binary files a/tests/examplefiles/output/fibonacci.tokigun.aheui and b/tests/examplefiles/output/fibonacci.tokigun.aheui differ diff --git a/tests/examplefiles/output/firefox.mak b/tests/examplefiles/output/firefox.mak index e8f9241..3072199 100644 Binary files a/tests/examplefiles/output/firefox.mak and b/tests/examplefiles/output/firefox.mak differ diff --git a/tests/examplefiles/output/flatline_example b/tests/examplefiles/output/flatline_example index 9821ef7..ac38064 100644 Binary files a/tests/examplefiles/output/flatline_example and b/tests/examplefiles/output/flatline_example differ diff --git a/tests/examplefiles/output/flipflop.sv b/tests/examplefiles/output/flipflop.sv index d3de72c..35a8d6d 100644 Binary files a/tests/examplefiles/output/flipflop.sv and b/tests/examplefiles/output/flipflop.sv differ diff --git a/tests/examplefiles/output/foo.sce b/tests/examplefiles/output/foo.sce index ab2ce68..48dfcbf 100644 Binary files a/tests/examplefiles/output/foo.sce and b/tests/examplefiles/output/foo.sce differ diff --git a/tests/examplefiles/output/format.ml b/tests/examplefiles/output/format.ml index 54a9ef2..bcbb0a6 100644 Binary files a/tests/examplefiles/output/format.ml and b/tests/examplefiles/output/format.ml differ diff --git a/tests/examplefiles/output/freefem.edp b/tests/examplefiles/output/freefem.edp index 381ae1c..8a3cced 100644 Binary files a/tests/examplefiles/output/freefem.edp and b/tests/examplefiles/output/freefem.edp differ diff --git a/tests/examplefiles/output/fucked_up.rb b/tests/examplefiles/output/fucked_up.rb index 0081951..724649e 100644 Binary files a/tests/examplefiles/output/fucked_up.rb and b/tests/examplefiles/output/fucked_up.rb differ diff --git a/tests/examplefiles/output/function.mu b/tests/examplefiles/output/function.mu index 022332c..cf66b9f 100644 Binary files a/tests/examplefiles/output/function.mu and b/tests/examplefiles/output/function.mu differ diff --git a/tests/examplefiles/output/functional.rst b/tests/examplefiles/output/functional.rst index ab73490..352839d 100644 Binary files a/tests/examplefiles/output/functional.rst and b/tests/examplefiles/output/functional.rst differ diff --git a/tests/examplefiles/output/garcia-wachs.kk b/tests/examplefiles/output/garcia-wachs.kk index 0d1f45b..00d08b4 100644 Binary files a/tests/examplefiles/output/garcia-wachs.kk and b/tests/examplefiles/output/garcia-wachs.kk differ diff --git a/tests/examplefiles/output/genclass.clj b/tests/examplefiles/output/genclass.clj index 79eb6ae..b09b617 100644 Binary files a/tests/examplefiles/output/genclass.clj and b/tests/examplefiles/output/genclass.clj differ diff --git a/tests/examplefiles/output/genshi_example.xml+genshi b/tests/examplefiles/output/genshi_example.xml+genshi index 58e655c..b4d1d27 100644 Binary files a/tests/examplefiles/output/genshi_example.xml+genshi and b/tests/examplefiles/output/genshi_example.xml+genshi differ diff --git a/tests/examplefiles/output/genshitext_example.genshitext b/tests/examplefiles/output/genshitext_example.genshitext index e5a91a0..3758629 100644 Binary files a/tests/examplefiles/output/genshitext_example.genshitext and b/tests/examplefiles/output/genshitext_example.genshitext differ diff --git a/tests/examplefiles/output/glsl.frag b/tests/examplefiles/output/glsl.frag index 2410ae3..7a324f6 100644 Binary files a/tests/examplefiles/output/glsl.frag and b/tests/examplefiles/output/glsl.frag differ diff --git a/tests/examplefiles/output/glsl.vert b/tests/examplefiles/output/glsl.vert index c219717..e1cbe33 100644 Binary files a/tests/examplefiles/output/glsl.vert and b/tests/examplefiles/output/glsl.vert differ diff --git a/tests/examplefiles/output/grammar-test.p6 b/tests/examplefiles/output/grammar-test.p6 index 394cca6..6b9ea43 100644 Binary files a/tests/examplefiles/output/grammar-test.p6 and b/tests/examplefiles/output/grammar-test.p6 differ diff --git a/tests/examplefiles/output/guidance.smv b/tests/examplefiles/output/guidance.smv index bc035e5..80f115d 100644 Binary files a/tests/examplefiles/output/guidance.smv and b/tests/examplefiles/output/guidance.smv differ diff --git a/tests/examplefiles/output/hash_syntax.rb b/tests/examplefiles/output/hash_syntax.rb index 1fdff2b..1ef6ff9 100644 Binary files a/tests/examplefiles/output/hash_syntax.rb and b/tests/examplefiles/output/hash_syntax.rb differ diff --git a/tests/examplefiles/output/hello-world.puzzlet.aheui b/tests/examplefiles/output/hello-world.puzzlet.aheui index 84ae062..d28a9af 100644 Binary files a/tests/examplefiles/output/hello-world.puzzlet.aheui and b/tests/examplefiles/output/hello-world.puzzlet.aheui differ diff --git a/tests/examplefiles/output/hello.at b/tests/examplefiles/output/hello.at index b02f8c3..0621fc2 100644 Binary files a/tests/examplefiles/output/hello.at and b/tests/examplefiles/output/hello.at differ diff --git a/tests/examplefiles/output/hello.golo b/tests/examplefiles/output/hello.golo index 5fc7f82..364f0aa 100644 Binary files a/tests/examplefiles/output/hello.golo and b/tests/examplefiles/output/hello.golo differ diff --git a/tests/examplefiles/output/hello.lsl b/tests/examplefiles/output/hello.lsl index 6be07ef..bfd0338 100644 Binary files a/tests/examplefiles/output/hello.lsl and b/tests/examplefiles/output/hello.lsl differ diff --git a/tests/examplefiles/output/hello.smali b/tests/examplefiles/output/hello.smali index c2893fc..b5377c1 100644 Binary files a/tests/examplefiles/output/hello.smali and b/tests/examplefiles/output/hello.smali differ diff --git a/tests/examplefiles/output/hello.sp b/tests/examplefiles/output/hello.sp index 0f237a9..c15daf1 100644 Binary files a/tests/examplefiles/output/hello.sp and b/tests/examplefiles/output/hello.sp differ diff --git a/tests/examplefiles/output/hexdump_debugexe b/tests/examplefiles/output/hexdump_debugexe index 3837dfd..ce463d6 100644 Binary files a/tests/examplefiles/output/hexdump_debugexe and b/tests/examplefiles/output/hexdump_debugexe differ diff --git a/tests/examplefiles/output/hexdump_hd b/tests/examplefiles/output/hexdump_hd index 362a658..77a7d57 100644 Binary files a/tests/examplefiles/output/hexdump_hd and b/tests/examplefiles/output/hexdump_hd differ diff --git a/tests/examplefiles/output/hexdump_hexcat b/tests/examplefiles/output/hexdump_hexcat index 6c697d4..19a8212 100644 Binary files a/tests/examplefiles/output/hexdump_hexcat and b/tests/examplefiles/output/hexdump_hexcat differ diff --git a/tests/examplefiles/output/hexdump_hexdump b/tests/examplefiles/output/hexdump_hexdump index 33866aa..1bf44bc 100644 Binary files a/tests/examplefiles/output/hexdump_hexdump and b/tests/examplefiles/output/hexdump_hexdump differ diff --git a/tests/examplefiles/output/hexdump_od b/tests/examplefiles/output/hexdump_od index 63cf7cf..eb3fc24 100644 Binary files a/tests/examplefiles/output/hexdump_od and b/tests/examplefiles/output/hexdump_od differ diff --git a/tests/examplefiles/output/hexdump_xxd b/tests/examplefiles/output/hexdump_xxd index caf1ea8..8ac0e59 100644 Binary files a/tests/examplefiles/output/hexdump_xxd and b/tests/examplefiles/output/hexdump_xxd differ diff --git a/tests/examplefiles/output/html+php_faulty.php b/tests/examplefiles/output/html+php_faulty.php index 81ff970..ed09128 100644 Binary files a/tests/examplefiles/output/html+php_faulty.php and b/tests/examplefiles/output/html+php_faulty.php differ diff --git a/tests/examplefiles/output/http_request_example b/tests/examplefiles/output/http_request_example index ab7924a..999c58a 100644 Binary files a/tests/examplefiles/output/http_request_example and b/tests/examplefiles/output/http_request_example differ diff --git a/tests/examplefiles/output/http_response_example b/tests/examplefiles/output/http_response_example index 516e8c2..20c7d5f 100644 Binary files a/tests/examplefiles/output/http_response_example and b/tests/examplefiles/output/http_response_example differ diff --git a/tests/examplefiles/output/hybris_File.hy b/tests/examplefiles/output/hybris_File.hy index 18689de..ce2084e 100644 Binary files a/tests/examplefiles/output/hybris_File.hy and b/tests/examplefiles/output/hybris_File.hy differ diff --git a/tests/examplefiles/output/idl_sample.pro b/tests/examplefiles/output/idl_sample.pro index fa62ff7..9981b44 100644 Binary files a/tests/examplefiles/output/idl_sample.pro and b/tests/examplefiles/output/idl_sample.pro differ diff --git a/tests/examplefiles/output/iex_example b/tests/examplefiles/output/iex_example index 9c1290f..4410273 100644 Binary files a/tests/examplefiles/output/iex_example and b/tests/examplefiles/output/iex_example differ diff --git a/tests/examplefiles/output/inet_pton6.dg b/tests/examplefiles/output/inet_pton6.dg index 50b218d..e3b07a1 100644 Binary files a/tests/examplefiles/output/inet_pton6.dg and b/tests/examplefiles/output/inet_pton6.dg differ diff --git a/tests/examplefiles/output/inform6_example b/tests/examplefiles/output/inform6_example index cbfe4b9..e05a432 100644 Binary files a/tests/examplefiles/output/inform6_example and b/tests/examplefiles/output/inform6_example differ diff --git a/tests/examplefiles/output/interp.scala b/tests/examplefiles/output/interp.scala index 5329718..5d3021b 100644 Binary files a/tests/examplefiles/output/interp.scala and b/tests/examplefiles/output/interp.scala differ diff --git a/tests/examplefiles/output/intro.ik b/tests/examplefiles/output/intro.ik index 8c8dcd3..a8ac3e6 100644 Binary files a/tests/examplefiles/output/intro.ik and b/tests/examplefiles/output/intro.ik differ diff --git a/tests/examplefiles/output/ints.php b/tests/examplefiles/output/ints.php index 61732ea..e107f79 100644 Binary files a/tests/examplefiles/output/ints.php and b/tests/examplefiles/output/ints.php differ diff --git a/tests/examplefiles/output/intsyn.fun b/tests/examplefiles/output/intsyn.fun index cfdc539..eb03921 100644 Binary files a/tests/examplefiles/output/intsyn.fun and b/tests/examplefiles/output/intsyn.fun differ diff --git a/tests/examplefiles/output/intsyn.sig b/tests/examplefiles/output/intsyn.sig index 16915b6..586fc68 100644 Binary files a/tests/examplefiles/output/intsyn.sig and b/tests/examplefiles/output/intsyn.sig differ diff --git a/tests/examplefiles/output/irb_heredoc b/tests/examplefiles/output/irb_heredoc index a6ecdd1..51bd8bc 100644 Binary files a/tests/examplefiles/output/irb_heredoc and b/tests/examplefiles/output/irb_heredoc differ diff --git a/tests/examplefiles/output/irc.lsp b/tests/examplefiles/output/irc.lsp index c399034..8458c78 100644 Binary files a/tests/examplefiles/output/irc.lsp and b/tests/examplefiles/output/irc.lsp differ diff --git a/tests/examplefiles/output/java.properties b/tests/examplefiles/output/java.properties index 1eaaee4..b84d422 100644 Binary files a/tests/examplefiles/output/java.properties and b/tests/examplefiles/output/java.properties differ diff --git a/tests/examplefiles/output/jbst_example1.jbst b/tests/examplefiles/output/jbst_example1.jbst index 7b2777f..54d7efb 100644 Binary files a/tests/examplefiles/output/jbst_example1.jbst and b/tests/examplefiles/output/jbst_example1.jbst differ diff --git a/tests/examplefiles/output/jbst_example2.jbst b/tests/examplefiles/output/jbst_example2.jbst index 83f992a..1dc8630 100644 Binary files a/tests/examplefiles/output/jbst_example2.jbst and b/tests/examplefiles/output/jbst_example2.jbst differ diff --git a/tests/examplefiles/output/jinjadesignerdoc.rst b/tests/examplefiles/output/jinjadesignerdoc.rst index 0fa440f..007d484 100644 Binary files a/tests/examplefiles/output/jinjadesignerdoc.rst and b/tests/examplefiles/output/jinjadesignerdoc.rst differ diff --git a/tests/examplefiles/output/json.lasso b/tests/examplefiles/output/json.lasso index 1c0362d..28ed79b 100644 Binary files a/tests/examplefiles/output/json.lasso and b/tests/examplefiles/output/json.lasso differ diff --git a/tests/examplefiles/output/json.lasso9 b/tests/examplefiles/output/json.lasso9 index f2e8a4a..7b11b1a 100644 Binary files a/tests/examplefiles/output/json.lasso9 and b/tests/examplefiles/output/json.lasso9 differ diff --git a/tests/examplefiles/output/language.hy b/tests/examplefiles/output/language.hy index 7dc9bfe..4dc4da8 100644 Binary files a/tests/examplefiles/output/language.hy and b/tests/examplefiles/output/language.hy differ diff --git a/tests/examplefiles/output/lighttpd_config.conf b/tests/examplefiles/output/lighttpd_config.conf index 56cac7a..66a8cf9 100644 Binary files a/tests/examplefiles/output/lighttpd_config.conf and b/tests/examplefiles/output/lighttpd_config.conf differ diff --git a/tests/examplefiles/output/limbo.b b/tests/examplefiles/output/limbo.b index 1af3da6..6300d98 100644 Binary files a/tests/examplefiles/output/limbo.b and b/tests/examplefiles/output/limbo.b differ diff --git a/tests/examplefiles/output/linecontinuation.py b/tests/examplefiles/output/linecontinuation.py index d6adfed..a8f30ab 100644 Binary files a/tests/examplefiles/output/linecontinuation.py and b/tests/examplefiles/output/linecontinuation.py differ diff --git a/tests/examplefiles/output/livescript-demo.ls b/tests/examplefiles/output/livescript-demo.ls index f762b9e..f8d49c8 100644 Binary files a/tests/examplefiles/output/livescript-demo.ls and b/tests/examplefiles/output/livescript-demo.ls differ diff --git a/tests/examplefiles/output/logos_example.xm b/tests/examplefiles/output/logos_example.xm index 1822f82..62fde57 100644 Binary files a/tests/examplefiles/output/logos_example.xm and b/tests/examplefiles/output/logos_example.xm differ diff --git a/tests/examplefiles/output/ltmain.sh b/tests/examplefiles/output/ltmain.sh index fad8880..b5f4b04 100644 Binary files a/tests/examplefiles/output/ltmain.sh and b/tests/examplefiles/output/ltmain.sh differ diff --git a/tests/examplefiles/output/main.cmake b/tests/examplefiles/output/main.cmake index b6c156f..c75c835 100644 Binary files a/tests/examplefiles/output/main.cmake and b/tests/examplefiles/output/main.cmake differ diff --git a/tests/examplefiles/output/markdown.lsp b/tests/examplefiles/output/markdown.lsp index 7476e4f..691508d 100644 Binary files a/tests/examplefiles/output/markdown.lsp and b/tests/examplefiles/output/markdown.lsp differ diff --git a/tests/examplefiles/output/matlab_noreturn b/tests/examplefiles/output/matlab_noreturn index f5e60c6..335cdc7 100644 Binary files a/tests/examplefiles/output/matlab_noreturn and b/tests/examplefiles/output/matlab_noreturn differ diff --git a/tests/examplefiles/output/matlab_sample b/tests/examplefiles/output/matlab_sample index ef164b5..25f67ef 100644 Binary files a/tests/examplefiles/output/matlab_sample and b/tests/examplefiles/output/matlab_sample differ diff --git a/tests/examplefiles/output/matlabsession_sample.txt b/tests/examplefiles/output/matlabsession_sample.txt index 3000a93..363a5e2 100644 Binary files a/tests/examplefiles/output/matlabsession_sample.txt and b/tests/examplefiles/output/matlabsession_sample.txt differ diff --git a/tests/examplefiles/output/metagrammar.treetop b/tests/examplefiles/output/metagrammar.treetop index 036170b..3bfee89 100644 Binary files a/tests/examplefiles/output/metagrammar.treetop and b/tests/examplefiles/output/metagrammar.treetop differ diff --git a/tests/examplefiles/output/minehunt.qml b/tests/examplefiles/output/minehunt.qml index e33bb48..78d0c74 100644 Binary files a/tests/examplefiles/output/minehunt.qml and b/tests/examplefiles/output/minehunt.qml differ diff --git a/tests/examplefiles/output/minimal.ns2 b/tests/examplefiles/output/minimal.ns2 index 388ebae..e903271 100644 Binary files a/tests/examplefiles/output/minimal.ns2 and b/tests/examplefiles/output/minimal.ns2 differ diff --git a/tests/examplefiles/output/modula2_test_cases.def b/tests/examplefiles/output/modula2_test_cases.def index d2c677e..068fc30 100644 Binary files a/tests/examplefiles/output/modula2_test_cases.def and b/tests/examplefiles/output/modula2_test_cases.def differ diff --git a/tests/examplefiles/output/moin_SyntaxReference.txt b/tests/examplefiles/output/moin_SyntaxReference.txt index a05647f..db0440e 100644 Binary files a/tests/examplefiles/output/moin_SyntaxReference.txt and b/tests/examplefiles/output/moin_SyntaxReference.txt differ diff --git a/tests/examplefiles/output/multiline_regexes.rb b/tests/examplefiles/output/multiline_regexes.rb index 3e8f95a..59fe086 100644 Binary files a/tests/examplefiles/output/multiline_regexes.rb and b/tests/examplefiles/output/multiline_regexes.rb differ diff --git a/tests/examplefiles/output/nanomsg.intr b/tests/examplefiles/output/nanomsg.intr index abf262b..cc5a022 100644 Binary files a/tests/examplefiles/output/nanomsg.intr and b/tests/examplefiles/output/nanomsg.intr differ diff --git a/tests/examplefiles/output/nasm_aoutso.asm b/tests/examplefiles/output/nasm_aoutso.asm index 1bde851..00ac80b 100644 Binary files a/tests/examplefiles/output/nasm_aoutso.asm and b/tests/examplefiles/output/nasm_aoutso.asm differ diff --git a/tests/examplefiles/output/nasm_objexe.asm b/tests/examplefiles/output/nasm_objexe.asm index 6f41096..39383f9 100644 Binary files a/tests/examplefiles/output/nasm_objexe.asm and b/tests/examplefiles/output/nasm_objexe.asm differ diff --git a/tests/examplefiles/output/nemerle_sample.n b/tests/examplefiles/output/nemerle_sample.n index 45ec4b9..005119e 100644 Binary files a/tests/examplefiles/output/nemerle_sample.n and b/tests/examplefiles/output/nemerle_sample.n differ diff --git a/tests/examplefiles/output/nginx_nginx.conf b/tests/examplefiles/output/nginx_nginx.conf index 1e59c30..4a9c50e 100644 Binary files a/tests/examplefiles/output/nginx_nginx.conf and b/tests/examplefiles/output/nginx_nginx.conf differ diff --git a/tests/examplefiles/output/noexcept.cpp b/tests/examplefiles/output/noexcept.cpp index 46b5a01..8129edf 100644 Binary files a/tests/examplefiles/output/noexcept.cpp and b/tests/examplefiles/output/noexcept.cpp differ diff --git a/tests/examplefiles/output/numbers.c b/tests/examplefiles/output/numbers.c index 3600ed8..dc5732c 100644 Binary files a/tests/examplefiles/output/numbers.c and b/tests/examplefiles/output/numbers.c differ diff --git a/tests/examplefiles/output/objc_example.m b/tests/examplefiles/output/objc_example.m index d2decc6..7f3e27b 100644 Binary files a/tests/examplefiles/output/objc_example.m and b/tests/examplefiles/output/objc_example.m differ diff --git a/tests/examplefiles/output/openedge_example b/tests/examplefiles/output/openedge_example index 33285f9..db195c5 100644 Binary files a/tests/examplefiles/output/openedge_example and b/tests/examplefiles/output/openedge_example differ diff --git a/tests/examplefiles/output/pacman.conf b/tests/examplefiles/output/pacman.conf index eaacbab..aba71e6 100644 Binary files a/tests/examplefiles/output/pacman.conf and b/tests/examplefiles/output/pacman.conf differ diff --git a/tests/examplefiles/output/pacman.ijs b/tests/examplefiles/output/pacman.ijs index 0ba63b9..b05c97e 100644 Binary files a/tests/examplefiles/output/pacman.ijs and b/tests/examplefiles/output/pacman.ijs differ diff --git a/tests/examplefiles/output/pawn_example b/tests/examplefiles/output/pawn_example index 4283a0a..c463225 100644 Binary files a/tests/examplefiles/output/pawn_example and b/tests/examplefiles/output/pawn_example differ diff --git a/tests/examplefiles/output/perl_misc b/tests/examplefiles/output/perl_misc index f1e4754..b7b0f19 100644 Binary files a/tests/examplefiles/output/perl_misc and b/tests/examplefiles/output/perl_misc differ diff --git a/tests/examplefiles/output/perl_perl5db b/tests/examplefiles/output/perl_perl5db index b68bd7b..9fa2a8c 100644 Binary files a/tests/examplefiles/output/perl_perl5db and b/tests/examplefiles/output/perl_perl5db differ diff --git a/tests/examplefiles/output/perl_regex-delims b/tests/examplefiles/output/perl_regex-delims index 68b3690..bc079d1 100644 Binary files a/tests/examplefiles/output/perl_regex-delims and b/tests/examplefiles/output/perl_regex-delims differ diff --git a/tests/examplefiles/output/perlfunc.1 b/tests/examplefiles/output/perlfunc.1 index a35d2e9..fb06127 100644 Binary files a/tests/examplefiles/output/perlfunc.1 and b/tests/examplefiles/output/perlfunc.1 differ diff --git a/tests/examplefiles/output/phpMyAdmin.spec b/tests/examplefiles/output/phpMyAdmin.spec index 1c0974d..c84a98f 100644 Binary files a/tests/examplefiles/output/phpMyAdmin.spec and b/tests/examplefiles/output/phpMyAdmin.spec differ diff --git a/tests/examplefiles/output/phpcomplete.vim b/tests/examplefiles/output/phpcomplete.vim index 4b7b856..dc886d8 100644 Binary files a/tests/examplefiles/output/phpcomplete.vim and b/tests/examplefiles/output/phpcomplete.vim differ diff --git a/tests/examplefiles/output/pkgconfig_example.pc b/tests/examplefiles/output/pkgconfig_example.pc index 53f85f9..a0fe961 100644 Binary files a/tests/examplefiles/output/pkgconfig_example.pc and b/tests/examplefiles/output/pkgconfig_example.pc differ diff --git a/tests/examplefiles/output/plain.bst b/tests/examplefiles/output/plain.bst index 0695399..68a6a39 100644 Binary files a/tests/examplefiles/output/plain.bst and b/tests/examplefiles/output/plain.bst differ diff --git a/tests/examplefiles/output/pleac.in.rb b/tests/examplefiles/output/pleac.in.rb index e2083cc..c2cb494 100644 Binary files a/tests/examplefiles/output/pleac.in.rb and b/tests/examplefiles/output/pleac.in.rb differ diff --git a/tests/examplefiles/output/postgresql_test.txt b/tests/examplefiles/output/postgresql_test.txt index 47b20d6..0a7c7b3 100644 Binary files a/tests/examplefiles/output/postgresql_test.txt and b/tests/examplefiles/output/postgresql_test.txt differ diff --git a/tests/examplefiles/output/pppoe.applescript b/tests/examplefiles/output/pppoe.applescript index 763d060..994cfc2 100644 Binary files a/tests/examplefiles/output/pppoe.applescript and b/tests/examplefiles/output/pppoe.applescript differ diff --git a/tests/examplefiles/output/psql_session.txt b/tests/examplefiles/output/psql_session.txt index a7e7950..7d691ad 100644 Binary files a/tests/examplefiles/output/psql_session.txt and b/tests/examplefiles/output/psql_session.txt differ diff --git a/tests/examplefiles/output/py3_test.txt b/tests/examplefiles/output/py3_test.txt index 1ce2dca..5305ec3 100644 Binary files a/tests/examplefiles/output/py3_test.txt and b/tests/examplefiles/output/py3_test.txt differ diff --git a/tests/examplefiles/output/py3tb_test.py3tb b/tests/examplefiles/output/py3tb_test.py3tb index c574926..446462c 100644 Binary files a/tests/examplefiles/output/py3tb_test.py3tb and b/tests/examplefiles/output/py3tb_test.py3tb differ diff --git a/tests/examplefiles/output/pycon_ctrlc_traceback b/tests/examplefiles/output/pycon_ctrlc_traceback index f259fb4..82c04eb 100644 Binary files a/tests/examplefiles/output/pycon_ctrlc_traceback and b/tests/examplefiles/output/pycon_ctrlc_traceback differ diff --git a/tests/examplefiles/output/pycon_test.pycon b/tests/examplefiles/output/pycon_test.pycon index 879bc06..ba015f2 100644 Binary files a/tests/examplefiles/output/pycon_test.pycon and b/tests/examplefiles/output/pycon_test.pycon differ diff --git a/tests/examplefiles/output/pytb_test2.pytb b/tests/examplefiles/output/pytb_test2.pytb index c29ddea..86480a8 100644 Binary files a/tests/examplefiles/output/pytb_test2.pytb and b/tests/examplefiles/output/pytb_test2.pytb differ diff --git a/tests/examplefiles/output/pytb_test3.pytb b/tests/examplefiles/output/pytb_test3.pytb index 1bab41a..230bb43 100644 Binary files a/tests/examplefiles/output/pytb_test3.pytb and b/tests/examplefiles/output/pytb_test3.pytb differ diff --git a/tests/examplefiles/output/python25-bsd.mak b/tests/examplefiles/output/python25-bsd.mak index 22c516e..dcb66f5 100644 Binary files a/tests/examplefiles/output/python25-bsd.mak and b/tests/examplefiles/output/python25-bsd.mak differ diff --git a/tests/examplefiles/output/qbasic_example b/tests/examplefiles/output/qbasic_example index 4ba4d5b..89b75e4 100644 Binary files a/tests/examplefiles/output/qbasic_example and b/tests/examplefiles/output/qbasic_example differ diff --git a/tests/examplefiles/output/qsort.prolog b/tests/examplefiles/output/qsort.prolog index 9d431e1..ab43f8a 100644 Binary files a/tests/examplefiles/output/qsort.prolog and b/tests/examplefiles/output/qsort.prolog differ diff --git a/tests/examplefiles/output/r-console-transcript.Rout b/tests/examplefiles/output/r-console-transcript.Rout index 0bdd4db..8326a99 100644 Binary files a/tests/examplefiles/output/r-console-transcript.Rout and b/tests/examplefiles/output/r-console-transcript.Rout differ diff --git a/tests/examplefiles/output/r6rs-comments.scm b/tests/examplefiles/output/r6rs-comments.scm index c241f5f..cc8b738 100644 Binary files a/tests/examplefiles/output/r6rs-comments.scm and b/tests/examplefiles/output/r6rs-comments.scm differ diff --git a/tests/examplefiles/output/ragel-cpp_rlscan b/tests/examplefiles/output/ragel-cpp_rlscan index 3f1ada9..41089ee 100644 Binary files a/tests/examplefiles/output/ragel-cpp_rlscan and b/tests/examplefiles/output/ragel-cpp_rlscan differ diff --git a/tests/examplefiles/output/ragel-cpp_snippet b/tests/examplefiles/output/ragel-cpp_snippet index c4bd383..a53cff9 100644 Binary files a/tests/examplefiles/output/ragel-cpp_snippet and b/tests/examplefiles/output/ragel-cpp_snippet differ diff --git a/tests/examplefiles/output/regex.js b/tests/examplefiles/output/regex.js index 58c2369..b762e04 100644 Binary files a/tests/examplefiles/output/regex.js and b/tests/examplefiles/output/regex.js differ diff --git a/tests/examplefiles/output/resourcebundle_demo b/tests/examplefiles/output/resourcebundle_demo index 1364560..48d65de 100644 Binary files a/tests/examplefiles/output/resourcebundle_demo and b/tests/examplefiles/output/resourcebundle_demo differ diff --git a/tests/examplefiles/output/reversi.lsp b/tests/examplefiles/output/reversi.lsp index 63b448d..4d16429 100644 Binary files a/tests/examplefiles/output/reversi.lsp and b/tests/examplefiles/output/reversi.lsp differ diff --git a/tests/examplefiles/output/rnc_example.rnc b/tests/examplefiles/output/rnc_example.rnc index 8dc1f9c..4fd2e83 100644 Binary files a/tests/examplefiles/output/rnc_example.rnc and b/tests/examplefiles/output/rnc_example.rnc differ diff --git a/tests/examplefiles/output/roboconf.graph b/tests/examplefiles/output/roboconf.graph index 74e372c..f73d5f6 100644 Binary files a/tests/examplefiles/output/roboconf.graph and b/tests/examplefiles/output/roboconf.graph differ diff --git a/tests/examplefiles/output/roboconf.instances b/tests/examplefiles/output/roboconf.instances index 7512543..5006ad6 100644 Binary files a/tests/examplefiles/output/roboconf.instances and b/tests/examplefiles/output/roboconf.instances differ diff --git a/tests/examplefiles/output/robotframework_test.txt b/tests/examplefiles/output/robotframework_test.txt index 08cd588..7f91d80 100644 Binary files a/tests/examplefiles/output/robotframework_test.txt and b/tests/examplefiles/output/robotframework_test.txt differ diff --git a/tests/examplefiles/output/rql-queries.rql b/tests/examplefiles/output/rql-queries.rql index b553c97..0875040 100644 Binary files a/tests/examplefiles/output/rql-queries.rql and b/tests/examplefiles/output/rql-queries.rql differ diff --git a/tests/examplefiles/output/ruby_func_def.rb b/tests/examplefiles/output/ruby_func_def.rb index 8f91a03..15aef91 100644 Binary files a/tests/examplefiles/output/ruby_func_def.rb and b/tests/examplefiles/output/ruby_func_def.rb differ diff --git a/tests/examplefiles/output/sample.qvto b/tests/examplefiles/output/sample.qvto index 9d03863..ba2fed9 100644 Binary files a/tests/examplefiles/output/sample.qvto and b/tests/examplefiles/output/sample.qvto differ diff --git a/tests/examplefiles/output/scilab.sci b/tests/examplefiles/output/scilab.sci index bc6ea56..49e031f 100644 Binary files a/tests/examplefiles/output/scilab.sci and b/tests/examplefiles/output/scilab.sci differ diff --git a/tests/examplefiles/output/scope.cirru b/tests/examplefiles/output/scope.cirru index 1963e7f..3a21997 100644 Binary files a/tests/examplefiles/output/scope.cirru and b/tests/examplefiles/output/scope.cirru differ diff --git a/tests/examplefiles/output/session.dylan-console b/tests/examplefiles/output/session.dylan-console index a6a873d..6f3dc3f 100644 Binary files a/tests/examplefiles/output/session.dylan-console and b/tests/examplefiles/output/session.dylan-console differ diff --git a/tests/examplefiles/output/sibling.prolog b/tests/examplefiles/output/sibling.prolog index f5819ac..3dbf7cd 100644 Binary files a/tests/examplefiles/output/sibling.prolog and b/tests/examplefiles/output/sibling.prolog differ diff --git a/tests/examplefiles/output/simple.camkes b/tests/examplefiles/output/simple.camkes index fc06cec..36529dd 100644 Binary files a/tests/examplefiles/output/simple.camkes and b/tests/examplefiles/output/simple.camkes differ diff --git a/tests/examplefiles/output/simple.croc b/tests/examplefiles/output/simple.croc index 76f7da8..fbe1568 100644 Binary files a/tests/examplefiles/output/simple.croc and b/tests/examplefiles/output/simple.croc differ diff --git a/tests/examplefiles/output/smarty_example.html b/tests/examplefiles/output/smarty_example.html index dbe030c..6c00cf0 100644 Binary files a/tests/examplefiles/output/smarty_example.html and b/tests/examplefiles/output/smarty_example.html differ diff --git a/tests/examplefiles/output/source.lgt b/tests/examplefiles/output/source.lgt index c80d90d..ca88dba 100644 Binary files a/tests/examplefiles/output/source.lgt and b/tests/examplefiles/output/source.lgt differ diff --git a/tests/examplefiles/output/sources.list b/tests/examplefiles/output/sources.list index 2659a31..5b6837f 100644 Binary files a/tests/examplefiles/output/sources.list and b/tests/examplefiles/output/sources.list differ diff --git a/tests/examplefiles/output/sparql.rq b/tests/examplefiles/output/sparql.rq index 3b708cc..1deea7c 100644 Binary files a/tests/examplefiles/output/sparql.rq and b/tests/examplefiles/output/sparql.rq differ diff --git a/tests/examplefiles/output/sphere.pov b/tests/examplefiles/output/sphere.pov index 64339f0..3ea403b 100644 Binary files a/tests/examplefiles/output/sphere.pov and b/tests/examplefiles/output/sphere.pov differ diff --git a/tests/examplefiles/output/sqlite3.sqlite3-console b/tests/examplefiles/output/sqlite3.sqlite3-console index 45bc178..590788a 100644 Binary files a/tests/examplefiles/output/sqlite3.sqlite3-console and b/tests/examplefiles/output/sqlite3.sqlite3-console differ diff --git a/tests/examplefiles/output/squid.conf b/tests/examplefiles/output/squid.conf index 931ddc0..875617f 100644 Binary files a/tests/examplefiles/output/squid.conf and b/tests/examplefiles/output/squid.conf differ diff --git a/tests/examplefiles/output/string.jl b/tests/examplefiles/output/string.jl index a636603..ea000cd 100644 Binary files a/tests/examplefiles/output/string.jl and b/tests/examplefiles/output/string.jl differ diff --git a/tests/examplefiles/output/string_delimiters.d b/tests/examplefiles/output/string_delimiters.d index 52a71d8..d1abc36 100644 Binary files a/tests/examplefiles/output/string_delimiters.d and b/tests/examplefiles/output/string_delimiters.d differ diff --git a/tests/examplefiles/output/stripheredoc.sh b/tests/examplefiles/output/stripheredoc.sh index 30b92cf..f5e399b 100644 Binary files a/tests/examplefiles/output/stripheredoc.sh and b/tests/examplefiles/output/stripheredoc.sh differ diff --git a/tests/examplefiles/output/subr.el b/tests/examplefiles/output/subr.el index 14e49a0..0214f0e 100644 Binary files a/tests/examplefiles/output/subr.el and b/tests/examplefiles/output/subr.el differ diff --git a/tests/examplefiles/output/swig_java.swg b/tests/examplefiles/output/swig_java.swg index 09548b6..61526dd 100644 Binary files a/tests/examplefiles/output/swig_java.swg and b/tests/examplefiles/output/swig_java.swg differ diff --git a/tests/examplefiles/output/swig_std_vector.i b/tests/examplefiles/output/swig_std_vector.i index 7d6c57d..6c209b8 100644 Binary files a/tests/examplefiles/output/swig_std_vector.i and b/tests/examplefiles/output/swig_std_vector.i differ diff --git a/tests/examplefiles/output/tads3_example.t b/tests/examplefiles/output/tads3_example.t index 4bf61cb..4ef0e00 100644 Binary files a/tests/examplefiles/output/tads3_example.t and b/tests/examplefiles/output/tads3_example.t differ diff --git a/tests/examplefiles/output/teraterm.ttl b/tests/examplefiles/output/teraterm.ttl index 9706ff8..bbbb1e1 100644 Binary files a/tests/examplefiles/output/teraterm.ttl and b/tests/examplefiles/output/teraterm.ttl differ diff --git a/tests/examplefiles/output/termcap b/tests/examplefiles/output/termcap index ca2a273..dea0ad0 100644 Binary files a/tests/examplefiles/output/termcap and b/tests/examplefiles/output/termcap differ diff --git a/tests/examplefiles/output/terminfo b/tests/examplefiles/output/terminfo index c8f0428..c3258c9 100644 Binary files a/tests/examplefiles/output/terminfo and b/tests/examplefiles/output/terminfo differ diff --git a/tests/examplefiles/output/test-3.0.xq b/tests/examplefiles/output/test-3.0.xq index be79c94..0aa7909 100644 Binary files a/tests/examplefiles/output/test-3.0.xq and b/tests/examplefiles/output/test-3.0.xq differ diff --git a/tests/examplefiles/output/test-exist-update.xq b/tests/examplefiles/output/test-exist-update.xq index 35b1202..9c4acbb 100644 Binary files a/tests/examplefiles/output/test-exist-update.xq and b/tests/examplefiles/output/test-exist-update.xq differ diff --git a/tests/examplefiles/output/test.R b/tests/examplefiles/output/test.R index d81aa23..ed29937 100644 Binary files a/tests/examplefiles/output/test.R and b/tests/examplefiles/output/test.R differ diff --git a/tests/examplefiles/output/test.adb b/tests/examplefiles/output/test.adb index d3299c0..523bd95 100644 Binary files a/tests/examplefiles/output/test.adb and b/tests/examplefiles/output/test.adb differ diff --git a/tests/examplefiles/output/test.adls b/tests/examplefiles/output/test.adls index bf37577..fdb7d48 100644 Binary files a/tests/examplefiles/output/test.adls and b/tests/examplefiles/output/test.adls differ diff --git a/tests/examplefiles/output/test.agda b/tests/examplefiles/output/test.agda index e579d10..5039ea1 100644 Binary files a/tests/examplefiles/output/test.agda and b/tests/examplefiles/output/test.agda differ diff --git a/tests/examplefiles/output/test.apl b/tests/examplefiles/output/test.apl index 5ab5c49..9ab981b 100644 Binary files a/tests/examplefiles/output/test.apl and b/tests/examplefiles/output/test.apl differ diff --git a/tests/examplefiles/output/test.asy b/tests/examplefiles/output/test.asy index 06b36c2..47e7e96 100644 Binary files a/tests/examplefiles/output/test.asy and b/tests/examplefiles/output/test.asy differ diff --git a/tests/examplefiles/output/test.awk b/tests/examplefiles/output/test.awk index 4a377ea..1dbd6ec 100644 Binary files a/tests/examplefiles/output/test.awk and b/tests/examplefiles/output/test.awk differ diff --git a/tests/examplefiles/output/test.bb b/tests/examplefiles/output/test.bb index 5d704d4..9ab3d1f 100644 Binary files a/tests/examplefiles/output/test.bb and b/tests/examplefiles/output/test.bb differ diff --git a/tests/examplefiles/output/test.bib b/tests/examplefiles/output/test.bib index 980bea0..97131ee 100644 Binary files a/tests/examplefiles/output/test.bib and b/tests/examplefiles/output/test.bib differ diff --git a/tests/examplefiles/output/test.bmx b/tests/examplefiles/output/test.bmx index 422b4e1..4d44322 100644 Binary files a/tests/examplefiles/output/test.bmx and b/tests/examplefiles/output/test.bmx differ diff --git a/tests/examplefiles/output/test.boo b/tests/examplefiles/output/test.boo index 36a5473..aaf2f6e 100644 Binary files a/tests/examplefiles/output/test.boo and b/tests/examplefiles/output/test.boo differ diff --git a/tests/examplefiles/output/test.bpl b/tests/examplefiles/output/test.bpl index 78b3d47..38dcd68 100644 Binary files a/tests/examplefiles/output/test.bpl and b/tests/examplefiles/output/test.bpl differ diff --git a/tests/examplefiles/output/test.bro b/tests/examplefiles/output/test.bro index 928eeb6..6f3b10d 100644 Binary files a/tests/examplefiles/output/test.bro and b/tests/examplefiles/output/test.bro differ diff --git a/tests/examplefiles/output/test.cadl b/tests/examplefiles/output/test.cadl index e4bd92a..12f6ec6 100644 Binary files a/tests/examplefiles/output/test.cadl and b/tests/examplefiles/output/test.cadl differ diff --git a/tests/examplefiles/output/test.cr b/tests/examplefiles/output/test.cr index ef5a3be..c7d2769 100644 Binary files a/tests/examplefiles/output/test.cr and b/tests/examplefiles/output/test.cr differ diff --git a/tests/examplefiles/output/test.cs b/tests/examplefiles/output/test.cs index 280d69e..6ec1134 100644 Binary files a/tests/examplefiles/output/test.cs and b/tests/examplefiles/output/test.cs differ diff --git a/tests/examplefiles/output/test.csd b/tests/examplefiles/output/test.csd index c9e0355..bfae434 100644 Binary files a/tests/examplefiles/output/test.csd and b/tests/examplefiles/output/test.csd differ diff --git a/tests/examplefiles/output/test.css b/tests/examplefiles/output/test.css index 440934b..e2f949e 100644 Binary files a/tests/examplefiles/output/test.css and b/tests/examplefiles/output/test.css differ diff --git a/tests/examplefiles/output/test.cu b/tests/examplefiles/output/test.cu index 225c18a..584088c 100644 Binary files a/tests/examplefiles/output/test.cu and b/tests/examplefiles/output/test.cu differ diff --git a/tests/examplefiles/output/test.cyp b/tests/examplefiles/output/test.cyp index 1662e4c..80f8751 100644 Binary files a/tests/examplefiles/output/test.cyp and b/tests/examplefiles/output/test.cyp differ diff --git a/tests/examplefiles/output/test.d b/tests/examplefiles/output/test.d index 7f70a38..be1ca84 100644 Binary files a/tests/examplefiles/output/test.d and b/tests/examplefiles/output/test.d differ diff --git a/tests/examplefiles/output/test.dart b/tests/examplefiles/output/test.dart index 73a279e..37db41e 100644 Binary files a/tests/examplefiles/output/test.dart and b/tests/examplefiles/output/test.dart differ diff --git a/tests/examplefiles/output/test.dtd b/tests/examplefiles/output/test.dtd index 5677e21..da481e4 100644 Binary files a/tests/examplefiles/output/test.dtd and b/tests/examplefiles/output/test.dtd differ diff --git a/tests/examplefiles/output/test.ebnf b/tests/examplefiles/output/test.ebnf index a3e57e4..1b3eb3d 100644 Binary files a/tests/examplefiles/output/test.ebnf and b/tests/examplefiles/output/test.ebnf differ diff --git a/tests/examplefiles/output/test.ec b/tests/examplefiles/output/test.ec index 51672e3..aa1768c 100644 Binary files a/tests/examplefiles/output/test.ec and b/tests/examplefiles/output/test.ec differ diff --git a/tests/examplefiles/output/test.eh b/tests/examplefiles/output/test.eh index 4e9270a..b0042d0 100644 Binary files a/tests/examplefiles/output/test.eh and b/tests/examplefiles/output/test.eh differ diff --git a/tests/examplefiles/output/test.erl b/tests/examplefiles/output/test.erl index 1d9582a..bfe0cac 100644 Binary files a/tests/examplefiles/output/test.erl and b/tests/examplefiles/output/test.erl differ diff --git a/tests/examplefiles/output/test.escript b/tests/examplefiles/output/test.escript index 541ecb7..735d112 100644 Binary files a/tests/examplefiles/output/test.escript and b/tests/examplefiles/output/test.escript differ diff --git a/tests/examplefiles/output/test.evoque b/tests/examplefiles/output/test.evoque index c934db1..118dd6f 100644 Binary files a/tests/examplefiles/output/test.evoque and b/tests/examplefiles/output/test.evoque differ diff --git a/tests/examplefiles/output/test.fan b/tests/examplefiles/output/test.fan index fc8f955..1f66daa 100644 Binary files a/tests/examplefiles/output/test.fan and b/tests/examplefiles/output/test.fan differ diff --git a/tests/examplefiles/output/test.flx b/tests/examplefiles/output/test.flx index b2f2459..ac942fb 100644 Binary files a/tests/examplefiles/output/test.flx and b/tests/examplefiles/output/test.flx differ diff --git a/tests/examplefiles/output/test.gdc b/tests/examplefiles/output/test.gdc index 72f825d..4d36036 100644 Binary files a/tests/examplefiles/output/test.gdc and b/tests/examplefiles/output/test.gdc differ diff --git a/tests/examplefiles/output/test.gradle b/tests/examplefiles/output/test.gradle index 15081ac..c495304 100644 Binary files a/tests/examplefiles/output/test.gradle and b/tests/examplefiles/output/test.gradle differ diff --git a/tests/examplefiles/output/test.groovy b/tests/examplefiles/output/test.groovy index 5f37317..6c8581d 100644 Binary files a/tests/examplefiles/output/test.groovy and b/tests/examplefiles/output/test.groovy differ diff --git a/tests/examplefiles/output/test.hsail b/tests/examplefiles/output/test.hsail index b864987..6fb2282 100644 Binary files a/tests/examplefiles/output/test.hsail and b/tests/examplefiles/output/test.hsail differ diff --git a/tests/examplefiles/output/test.html b/tests/examplefiles/output/test.html index 3ca49c7..811383a 100644 Binary files a/tests/examplefiles/output/test.html and b/tests/examplefiles/output/test.html differ diff --git a/tests/examplefiles/output/test.idr b/tests/examplefiles/output/test.idr index 6868b81..590e976 100644 Binary files a/tests/examplefiles/output/test.idr and b/tests/examplefiles/output/test.idr differ diff --git a/tests/examplefiles/output/test.ini b/tests/examplefiles/output/test.ini index 1c1b69d..27f3d4a 100644 Binary files a/tests/examplefiles/output/test.ini and b/tests/examplefiles/output/test.ini differ diff --git a/tests/examplefiles/output/test.java b/tests/examplefiles/output/test.java index 2165f58..758717e 100644 Binary files a/tests/examplefiles/output/test.java and b/tests/examplefiles/output/test.java differ diff --git a/tests/examplefiles/output/test.jsp b/tests/examplefiles/output/test.jsp index 7591b24..2956444 100644 Binary files a/tests/examplefiles/output/test.jsp and b/tests/examplefiles/output/test.jsp differ diff --git a/tests/examplefiles/output/test.lean b/tests/examplefiles/output/test.lean index ed92339..5fdafbc 100644 Binary files a/tests/examplefiles/output/test.lean and b/tests/examplefiles/output/test.lean differ diff --git a/tests/examplefiles/output/test.maql b/tests/examplefiles/output/test.maql index 02d0782..2865795 100644 Binary files a/tests/examplefiles/output/test.maql and b/tests/examplefiles/output/test.maql differ diff --git a/tests/examplefiles/output/test.mask b/tests/examplefiles/output/test.mask index 2d01e3a..feff18b 100644 Binary files a/tests/examplefiles/output/test.mask and b/tests/examplefiles/output/test.mask differ diff --git a/tests/examplefiles/output/test.mod b/tests/examplefiles/output/test.mod index 88abc6e..2cb3147 100644 Binary files a/tests/examplefiles/output/test.mod and b/tests/examplefiles/output/test.mod differ diff --git a/tests/examplefiles/output/test.moo b/tests/examplefiles/output/test.moo index db439a6..f6c5148 100644 Binary files a/tests/examplefiles/output/test.moo and b/tests/examplefiles/output/test.moo differ diff --git a/tests/examplefiles/output/test.mt b/tests/examplefiles/output/test.mt index fef50b0..c62ee87 100644 Binary files a/tests/examplefiles/output/test.mt and b/tests/examplefiles/output/test.mt differ diff --git a/tests/examplefiles/output/test.myt b/tests/examplefiles/output/test.myt index 41a10bc..778b932 100644 Binary files a/tests/examplefiles/output/test.myt and b/tests/examplefiles/output/test.myt differ diff --git a/tests/examplefiles/output/test.ncl b/tests/examplefiles/output/test.ncl index 13ac2f0..291af18 100644 Binary files a/tests/examplefiles/output/test.ncl and b/tests/examplefiles/output/test.ncl differ diff --git a/tests/examplefiles/output/test.nim b/tests/examplefiles/output/test.nim index 0c4fab3..345892b 100644 Binary files a/tests/examplefiles/output/test.nim and b/tests/examplefiles/output/test.nim differ diff --git a/tests/examplefiles/output/test.odin b/tests/examplefiles/output/test.odin index 6787aa8..4b3ceb3 100644 Binary files a/tests/examplefiles/output/test.odin and b/tests/examplefiles/output/test.odin differ diff --git a/tests/examplefiles/output/test.opa b/tests/examplefiles/output/test.opa index 25557b0..c943f39 100644 Binary files a/tests/examplefiles/output/test.opa and b/tests/examplefiles/output/test.opa differ diff --git a/tests/examplefiles/output/test.orc b/tests/examplefiles/output/test.orc index ad4e973..a59242f 100644 Binary files a/tests/examplefiles/output/test.orc and b/tests/examplefiles/output/test.orc differ diff --git a/tests/examplefiles/output/test.p6 b/tests/examplefiles/output/test.p6 index da5b628..b30d0ef 100644 Binary files a/tests/examplefiles/output/test.p6 and b/tests/examplefiles/output/test.p6 differ diff --git a/tests/examplefiles/output/test.pan b/tests/examplefiles/output/test.pan index 8e036b9..5a459bd 100644 Binary files a/tests/examplefiles/output/test.pan and b/tests/examplefiles/output/test.pan differ diff --git a/tests/examplefiles/output/test.pas b/tests/examplefiles/output/test.pas index 8ce6070..a737de1 100644 Binary files a/tests/examplefiles/output/test.pas and b/tests/examplefiles/output/test.pas differ diff --git a/tests/examplefiles/output/test.php b/tests/examplefiles/output/test.php index 8e3ad6d..94b28d4 100644 Binary files a/tests/examplefiles/output/test.php and b/tests/examplefiles/output/test.php differ diff --git a/tests/examplefiles/output/test.pig b/tests/examplefiles/output/test.pig index 284aaee..5628c61 100644 Binary files a/tests/examplefiles/output/test.pig and b/tests/examplefiles/output/test.pig differ diff --git a/tests/examplefiles/output/test.plot b/tests/examplefiles/output/test.plot index 389f275..e64cd2d 100644 Binary files a/tests/examplefiles/output/test.plot and b/tests/examplefiles/output/test.plot differ diff --git a/tests/examplefiles/output/test.ps1 b/tests/examplefiles/output/test.ps1 index f89fde2..02dffcc 100644 Binary files a/tests/examplefiles/output/test.ps1 and b/tests/examplefiles/output/test.ps1 differ diff --git a/tests/examplefiles/output/test.psl b/tests/examplefiles/output/test.psl index 4946c6c..99f6753 100644 Binary files a/tests/examplefiles/output/test.psl and b/tests/examplefiles/output/test.psl differ diff --git a/tests/examplefiles/output/test.pwn b/tests/examplefiles/output/test.pwn index a3abfe5..4f21640 100644 Binary files a/tests/examplefiles/output/test.pwn and b/tests/examplefiles/output/test.pwn differ diff --git a/tests/examplefiles/output/test.pypylog b/tests/examplefiles/output/test.pypylog index 93a57b3..66f4e18 100644 Binary files a/tests/examplefiles/output/test.pypylog and b/tests/examplefiles/output/test.pypylog differ diff --git a/tests/examplefiles/output/test.r3 b/tests/examplefiles/output/test.r3 index 2178b95..6ec9a03 100644 Binary files a/tests/examplefiles/output/test.r3 and b/tests/examplefiles/output/test.r3 differ diff --git a/tests/examplefiles/output/test.rb b/tests/examplefiles/output/test.rb index d4c195d..dc588b1 100644 Binary files a/tests/examplefiles/output/test.rb and b/tests/examplefiles/output/test.rb differ diff --git a/tests/examplefiles/output/test.rhtml b/tests/examplefiles/output/test.rhtml index eae0eed..e0a103b 100644 Binary files a/tests/examplefiles/output/test.rhtml and b/tests/examplefiles/output/test.rhtml differ diff --git a/tests/examplefiles/output/test.rsl b/tests/examplefiles/output/test.rsl index d84ad39..445a99f 100644 Binary files a/tests/examplefiles/output/test.rsl and b/tests/examplefiles/output/test.rsl differ diff --git a/tests/examplefiles/output/test.scaml b/tests/examplefiles/output/test.scaml index a7378f9..2f7e8a8 100644 Binary files a/tests/examplefiles/output/test.scaml and b/tests/examplefiles/output/test.scaml differ diff --git a/tests/examplefiles/output/test.sco b/tests/examplefiles/output/test.sco index e4d7331..e0fe92b 100644 Binary files a/tests/examplefiles/output/test.sco and b/tests/examplefiles/output/test.sco differ diff --git a/tests/examplefiles/output/test.shen b/tests/examplefiles/output/test.shen index b1f41b5..2b827c6 100644 Binary files a/tests/examplefiles/output/test.shen and b/tests/examplefiles/output/test.shen differ diff --git a/tests/examplefiles/output/test.sil b/tests/examplefiles/output/test.sil index 0c9cd80..32327f9 100644 Binary files a/tests/examplefiles/output/test.sil and b/tests/examplefiles/output/test.sil differ diff --git a/tests/examplefiles/output/test.ssp b/tests/examplefiles/output/test.ssp index 02dca53..cf632f7 100644 Binary files a/tests/examplefiles/output/test.ssp and b/tests/examplefiles/output/test.ssp differ diff --git a/tests/examplefiles/output/test.swift b/tests/examplefiles/output/test.swift index a17dbaa..d48d7f9 100644 Binary files a/tests/examplefiles/output/test.swift and b/tests/examplefiles/output/test.swift differ diff --git a/tests/examplefiles/output/test.tcsh b/tests/examplefiles/output/test.tcsh index 12b3021..ac7b4f6 100644 Binary files a/tests/examplefiles/output/test.tcsh and b/tests/examplefiles/output/test.tcsh differ diff --git a/tests/examplefiles/output/test.vb b/tests/examplefiles/output/test.vb index 986e65c..aefce4e 100644 Binary files a/tests/examplefiles/output/test.vb and b/tests/examplefiles/output/test.vb differ diff --git a/tests/examplefiles/output/test.vhdl b/tests/examplefiles/output/test.vhdl index 50f5c2c..95d99c8 100644 Binary files a/tests/examplefiles/output/test.vhdl and b/tests/examplefiles/output/test.vhdl differ diff --git a/tests/examplefiles/output/test.xqy b/tests/examplefiles/output/test.xqy index 067c970..02cc0a4 100644 Binary files a/tests/examplefiles/output/test.xqy and b/tests/examplefiles/output/test.xqy differ diff --git a/tests/examplefiles/output/test.xsl b/tests/examplefiles/output/test.xsl index 08a65ca..111b5e5 100644 Binary files a/tests/examplefiles/output/test.xsl and b/tests/examplefiles/output/test.xsl differ diff --git a/tests/examplefiles/output/test.zep b/tests/examplefiles/output/test.zep index d662520..f610f32 100644 Binary files a/tests/examplefiles/output/test.zep and b/tests/examplefiles/output/test.zep differ diff --git a/tests/examplefiles/output/test2.odin b/tests/examplefiles/output/test2.odin index 7fee853..b282934 100644 Binary files a/tests/examplefiles/output/test2.odin and b/tests/examplefiles/output/test2.odin differ diff --git a/tests/examplefiles/output/test2.pypylog b/tests/examplefiles/output/test2.pypylog index b543399..f117a4b 100644 Binary files a/tests/examplefiles/output/test2.pypylog and b/tests/examplefiles/output/test2.pypylog differ diff --git a/tests/examplefiles/output/test_basic.adls b/tests/examplefiles/output/test_basic.adls index fad2921..9976b2b 100644 Binary files a/tests/examplefiles/output/test_basic.adls and b/tests/examplefiles/output/test_basic.adls differ diff --git a/tests/examplefiles/output/truncated.pytb b/tests/examplefiles/output/truncated.pytb index 92dd9a7..33a6bf9 100644 Binary files a/tests/examplefiles/output/truncated.pytb and b/tests/examplefiles/output/truncated.pytb differ diff --git a/tests/examplefiles/output/tsql_example.sql b/tests/examplefiles/output/tsql_example.sql index 200beb6..06c254a 100644 Binary files a/tests/examplefiles/output/tsql_example.sql and b/tests/examplefiles/output/tsql_example.sql differ diff --git a/tests/examplefiles/output/twig_test b/tests/examplefiles/output/twig_test index 49fe917..9bc293d 100644 Binary files a/tests/examplefiles/output/twig_test and b/tests/examplefiles/output/twig_test differ diff --git a/tests/examplefiles/output/type.lisp b/tests/examplefiles/output/type.lisp index edccdb0..9c70bc2 100644 Binary files a/tests/examplefiles/output/type.lisp and b/tests/examplefiles/output/type.lisp differ diff --git a/tests/examplefiles/output/typescript_example b/tests/examplefiles/output/typescript_example index 6c3dcb7..15674e4 100644 Binary files a/tests/examplefiles/output/typescript_example and b/tests/examplefiles/output/typescript_example differ diff --git a/tests/examplefiles/output/typoscript_example b/tests/examplefiles/output/typoscript_example index 8356faa..4a1b4a4 100644 Binary files a/tests/examplefiles/output/typoscript_example and b/tests/examplefiles/output/typoscript_example differ diff --git a/tests/examplefiles/output/underscore.coffee b/tests/examplefiles/output/underscore.coffee index f65166b..943c677 100644 Binary files a/tests/examplefiles/output/underscore.coffee and b/tests/examplefiles/output/underscore.coffee differ diff --git a/tests/examplefiles/output/unicode.applescript b/tests/examplefiles/output/unicode.applescript index e560c8c..8790c2b 100644 Binary files a/tests/examplefiles/output/unicode.applescript and b/tests/examplefiles/output/unicode.applescript differ diff --git a/tests/examplefiles/output/unicode.go b/tests/examplefiles/output/unicode.go index c1693d9..afae403 100644 Binary files a/tests/examplefiles/output/unicode.go and b/tests/examplefiles/output/unicode.go differ diff --git a/tests/examplefiles/output/unicode.js b/tests/examplefiles/output/unicode.js index 2e883e5..3b9aae7 100644 Binary files a/tests/examplefiles/output/unicode.js and b/tests/examplefiles/output/unicode.js differ diff --git a/tests/examplefiles/output/unicodedoc.py b/tests/examplefiles/output/unicodedoc.py index 8553f56..8a58ea5 100644 Binary files a/tests/examplefiles/output/unicodedoc.py and b/tests/examplefiles/output/unicodedoc.py differ diff --git a/tests/examplefiles/output/unix-io.lid b/tests/examplefiles/output/unix-io.lid index 506c862..bbf39c2 100644 Binary files a/tests/examplefiles/output/unix-io.lid and b/tests/examplefiles/output/unix-io.lid differ diff --git a/tests/examplefiles/output/varnish.vcl b/tests/examplefiles/output/varnish.vcl index 0860d1c..13a0047 100644 Binary files a/tests/examplefiles/output/varnish.vcl and b/tests/examplefiles/output/varnish.vcl differ diff --git a/tests/examplefiles/output/vbnet_test.bas b/tests/examplefiles/output/vbnet_test.bas index dc5dfc4..312f868 100644 Binary files a/tests/examplefiles/output/vbnet_test.bas and b/tests/examplefiles/output/vbnet_test.bas differ diff --git a/tests/examplefiles/output/vctreestatus_hg b/tests/examplefiles/output/vctreestatus_hg index f94f4f8..c84a291 100644 Binary files a/tests/examplefiles/output/vctreestatus_hg and b/tests/examplefiles/output/vctreestatus_hg differ diff --git a/tests/examplefiles/output/vimrc b/tests/examplefiles/output/vimrc index fed517f..9cacb7d 100644 Binary files a/tests/examplefiles/output/vimrc and b/tests/examplefiles/output/vimrc differ diff --git a/tests/examplefiles/output/vpath.mk b/tests/examplefiles/output/vpath.mk index 47ab6c1..a3cc840 100644 Binary files a/tests/examplefiles/output/vpath.mk and b/tests/examplefiles/output/vpath.mk differ diff --git a/tests/examplefiles/output/wdiff_example1.wdiff b/tests/examplefiles/output/wdiff_example1.wdiff index add5a61..449d884 100644 Binary files a/tests/examplefiles/output/wdiff_example1.wdiff and b/tests/examplefiles/output/wdiff_example1.wdiff differ diff --git a/tests/examplefiles/output/wdiff_example3.wdiff b/tests/examplefiles/output/wdiff_example3.wdiff index 75d0c19..5398a38 100644 Binary files a/tests/examplefiles/output/wdiff_example3.wdiff and b/tests/examplefiles/output/wdiff_example3.wdiff differ diff --git a/tests/examplefiles/output/webkit-transition.css b/tests/examplefiles/output/webkit-transition.css index 21b2160..4effb47 100644 Binary files a/tests/examplefiles/output/webkit-transition.css and b/tests/examplefiles/output/webkit-transition.css differ diff --git a/tests/examplefiles/output/while.pov b/tests/examplefiles/output/while.pov index 1e4d6e6..ac212a8 100644 Binary files a/tests/examplefiles/output/while.pov and b/tests/examplefiles/output/while.pov differ diff --git a/tests/examplefiles/output/wiki.factor b/tests/examplefiles/output/wiki.factor index d6024c8..734f272 100644 Binary files a/tests/examplefiles/output/wiki.factor and b/tests/examplefiles/output/wiki.factor differ diff --git a/tests/examplefiles/output/xml_example b/tests/examplefiles/output/xml_example index e54407e..eea9021 100644 Binary files a/tests/examplefiles/output/xml_example and b/tests/examplefiles/output/xml_example differ diff --git a/tests/examplefiles/output/xorg.conf b/tests/examplefiles/output/xorg.conf index a76bfdc..012eeb2 100644 Binary files a/tests/examplefiles/output/xorg.conf and b/tests/examplefiles/output/xorg.conf differ diff --git a/tests/examplefiles/output/yahalom.cpsa b/tests/examplefiles/output/yahalom.cpsa index 99d5b29..66a3299 100644 Binary files a/tests/examplefiles/output/yahalom.cpsa and b/tests/examplefiles/output/yahalom.cpsa differ diff --git a/tests/examplefiles/output/zmlrpc.f90 b/tests/examplefiles/output/zmlrpc.f90 index 2b3adf9..1f62183 100644 Binary files a/tests/examplefiles/output/zmlrpc.f90 and b/tests/examplefiles/output/zmlrpc.f90 differ diff --git a/tests/examplefiles/scdoc_manual.scd b/tests/examplefiles/scdoc_manual.scd new file mode 100644 index 0000000..65a2b36 --- /dev/null +++ b/tests/examplefiles/scdoc_manual.scd @@ -0,0 +1,197 @@ +scdoc(5) + +# NAME + +scdoc - document format for writing manual pages + +# SYNTAX + +Input files must use the UTF-8 encoding. + +## PREAMBLE + +Each scdoc file must begin with the following preamble: + + *name*(_section_) ["left\_footer" ["center\_header"]] + +*name* is the name of the man page you are writing, and _section_ is the section +you're writing for (see *man*(1) for information on manual sections). + +_left\_footer_ and _center\_header_ are optional arguments which set the text +positioned at those locations in the generated man page, and *must* be +surrounded with double quotes. + +## SECTION HEADERS + +Each section of your man page should begin with something similar to the +following: + + # HEADER NAME + +Subsection headers are also understood - use two hashes. Each header must have +an empty line on either side. + +## PARAGRAPHS + +Begin a new paragraph with an empty line. + +## LINE BREAKS + +Insert a line break by ending a line with \+\+. + +The result looks++ +like this. + +## FORMATTING + +Text can be made *bold* or _underlined_ with asterisks and underscores: \*bold\* +or \_underlined\_. Underscores in the_middle_of_words will be disregarded. + +## INDENTATION + +You may indent lines with tab characters (*\\t*) to indent them by 4 spaces in +the output. Indented lines may not contain headers. + + The result looks something like this. + + You may use multiple lines and most _formatting_. + +Deindent to return to normal, or indent again to increase your indentation +depth. + +## LISTS + +You may start bulleted lists with dashes (-), like so: + +``` +- Item 1 +- Item 2 + - Subitem 1 + - Subitem 2 +- Item 3 +``` + +The result looks like this: + +- Item 1 +- Item 2 + - Subitem 1 + - Subitem 2 +- Item 3 + +You may also extend long entries onto another line by giving it the same indent +level, plus two spaces. They will be rendered as a single list entry. + +``` +- Item 1 is pretty long so let's + break it up onto two lines +- Item 2 is shorter + - But its children can go on + for a while +``` + +- Item 1 is pretty long so let's + break it up onto two lines +- Item 2 is shorter + - But its children can go on + for a while + +## NUMBERED LISTS + +Numbered lists are similar to normal lists, but begin with periods (.) instead +of dashes (-), like so: + +``` +. Item 1 +. Item 2 +. Item 3, + with multiple lines +``` + +. Item 1 +. Item 2 +. Item 3, + with multiple lines + +## TABLES + +To begin a table, add an empty line followed by any number of rows. + +Each line of a table should start with | or : to start a new row or column +respectively (or space to continue the previous cell on multiple lines), +followed by [ or - or ] to align the contents to the left, center, or right, +followed by a space and the contents of that cell. You may use a space instead +of an alignment specifier to inherit the alignment of the same column in the +previous row. + +The first character of the first row is not limited to | and has special +meaning. [ will produce a table with borders around each cell. | will produce a +table with no borders. ] will produce a table with one border around the whole +table. + +To conclude your table, add an empty line after the last row. + +``` +[[ *Foo* +:- _Bar_ +:- +| *Row 1* +: Hello +:] world! +| *Row 2* +: こんにちは +: 世界 + ! +``` + +[[ *Foo* +:- _Bar_ +:- +| *Row 1* +: Hello +:] world! +| *Row 2* +: こんにちは +: 世界 + ! + +## LITERAL TEXT + +You may turn off scdoc formatting and output literal text with escape codes and +literal blocks. Inserting a \\ into your source will cause the subsequent symbol +to be treated as a literal and copied directly to the output. You may also make +blocks of literal syntax like so: + +``` +\``` +_This formatting_ will *not* be interpreted by scdoc. +\``` +``` + +These blocks will be indented one level. Note that literal text is shown +literally in the man viewer - that is, it's not a means for inserting your own +roff macros into the output. Note that \\ is still interpreted within literal +blocks, which for example can be useful to output \``` inside of a literal +block. + +## COMMENTS + +Lines beginning with ; and a space are ignored. + +``` +; This is a comment +``` + +# CONVENTIONS + +By convention, all scdoc documents should be hard wrapped at 80 columns. + +# SEE ALSO + +*scdoc*(1) + +# AUTHORS + +Maintained by Drew DeVault . Up-to-date sources can be found at +https://git.sr.ht/~sircmpwn/scdoc and bugs/patches can be submitted by email to +~sircmpwn/public-inbox@lists.sr.ht. diff --git a/tests/examplefiles/test.orc b/tests/examplefiles/test.orc index d113303..1e9eaa8 100644 --- a/tests/examplefiles/test.orc +++ b/tests/examplefiles/test.orc @@ -56,6 +56,7 @@ lua_exec {{ #include/**/"file.udo" #include/**/|file.udo| +#includestr/**/"$MACRO..udo" #ifdef MACRO #else diff --git a/tests/examplefiles/test.sco b/tests/examplefiles/test.sco index d997c1b..cffcfde 100644 --- a/tests/examplefiles/test.sco +++ b/tests/examplefiles/test.sco @@ -20,3 +20,4 @@ n label } } #include "score.sco" +#includestr/**/"$MACRO..sco" diff --git a/tests/examplefiles/test.sol b/tests/examplefiles/test.sol new file mode 100644 index 0000000..f7a6495 --- /dev/null +++ b/tests/examplefiles/test.sol @@ -0,0 +1,74 @@ +pragma solidity ^0.4.20; + +pragma solidity >=0.4.0 <0.7.0; + +// one-line singleline comment + +/* one-line multiline comment */ + +/* + multi-line multiline comment +*/ + +contract ContractName { + + address public publicaddress; + + uint varname1 = 1234; + int varname2 = 0x12abcdEF; + + string astringsingle = 'test "string" value\' single'; + string astringdouble = "test 'string' value\" double"; + + enum State { + NotStarted, + WorkInProgress, + Done + } + State public state; + + struct AStruct { + string name; + uint8 type; + } + + mapping(address => AStruct) registry; + + event Paid(uint256 value); + event Received(uint256 time); + event Withdraw(uint256 value); + + function addRegistry(string _name, uint8 _type) { + AStruct memory newItem = AStruct({ + name: _name, + type: _type + }); + + registry[msg.sender] = newItem; + } + + function getHash(AStruct item) returns(uint) { + return uint(keccak256(item.name, item.type)); + } + + function pay() public payable { + require(msg.sender == astronaut); + state = State.Paid; + Paid(msg.value); + } + + function receive() public { + require(msg.sender == arbiter); + require(state == State.Paid); + state = State.Received; + Received(now); + } + + function withdraw() public { + require(msg.sender == shipper); + require(state == State.Received); + state = State.Withdrawn; + Withdraw(this.balance); + shipper.transfer(this.balance); + } +} diff --git a/tests/examplefiles/test.zeek b/tests/examplefiles/test.zeek new file mode 100644 index 0000000..ee6bad8 --- /dev/null +++ b/tests/examplefiles/test.zeek @@ -0,0 +1,181 @@ +# An example of the Zeek scripting language. + +##! A Zeekygen-style summmary comment. + +# TODO: just an example of a todo-indicator + +@load base/frameworks/notice + +@if ( F ) +@endif + +module Example; + +export { + + type mycount: count; + + type SimpleEnum: enum { ONE, TWO, THREE }; + + redef enum SimpleEnum += { + + ## A Zeekygen-style comment. + FOUR, + FIVE, ##< A Zeekygen-style comment. + }; + + type SimpleRecord: record { + field1: count; + field2: bool; + } &redef; + + redef record SimpleRecord += { + + field3: string &optional; + + field4: string &default="blah"; + }; + + const init_option: bool = T; + + option runtime_option: bool = F; + + global test_opaque: opaque of md5; + + global test_vector: vector of count; + + global myfunction: function(msg: string, c: count &default=0): count; + + global myhook: hook(tag: string); + + global myevent: event(tag: string); +} + +function myfunction(msg: string, c: count): count + { + print "in myfunction", msg, c; + return 0; + } + +event myevent(msg: string) &priority=1 + { + print "in myevent"; + } + +hook myhook(msg: string) + { + print "in myevent"; + } + +event zeek_init() + { + local b = T; + local s = "\xff\xaf\"and more after the escaped quote"; + local p = /foo|bar\xbe\/and more after the escaped slash/; + local c = 10; + + local sr = SimpleRecord($field1 = 0, $field2 = T, $field3 = "hi"); + + print sr?$field3, sr$field1; + + local myset: set[string] = set("one", "two", "three"); + + add myset["four"]; + delete myset["one"]; + + for ( ms in myset ) + { + print ms is string, s as string; + + print s[1:3]; + + local tern: count = s == "two" ? 2 : 0; + + if ( s !in myset ) + print fmt("error %4.2f: %s", 3.14159, "wtf?"); + } + + switch ( c ) { + case 1: + break; + case 2: + fallthrough; + default: + break; + } + + if ( ! b ) + print "here"; + else + print "there"; + + while ( c != 0 ) + { + if ( c >= 5 ) + c += 0; + else if ( c == 8 ) + c -= 0; + + c = c / 1; + c = c / 1; + c = c - 1; + } + + print |myset|; + print ~5; + print 1 & 0xff; + print 2 ^ 5; + + myfunction ("hello function"); + hook myhook("hell hook"); + event myevent("hello event"); + schedule 1sec { myevent("hello scheduled event") }; + + print 0, 7; + print 0xff, 0xdeadbeef; + + print 3.14159; + print 1234.0; + print 1234e0; + print .003E-23; + print .003E+23; + + print 123/udp; + print 8000/tcp; + print 13/icmp; + print 42/unknown; + + print google.com; + print 192.168.50.1; + print 255.255.255.255; + print 0.0.0.0; + + print 10.0.0.0/16; + + print [2001:0db8:85a3:0000:0000:8a2e:0370:7334]; + # test for case insensitivity + print [2001:0DB8:85A3:0000:0000:8A2E:0370:7334]; + # any case mixture is allowed + print [2001:0dB8:85a3:0000:0000:8A2E:0370:7334]; + # leading zeroes of a 16-bit group may be omitted + print [2001:db8:85a3:0:0:8a2e:370:7334]; + # a single occurrence of consecutive groups of zeroes may be replaced by :: + print [2001:db8:85a3::8a2e:370:7334]; + # all zeroes should work + print [0:0:0:0:0:0:0:0]; + # all zeroes condensed should work + print [::]; + # hybrid ipv6-ipv4 address should work + print [2001:db8:0:0:0:FFFF:192.168.0.5]; + # hybrid ipv6-ipv4 address with zero ommission should work + print [2001:db8::FFFF:192.168.0.5]; + + print [2001:0db8:85a3:0000:0000:8a2e:0370:7334]/64; + + print 1day, 1days, 1.0day, 1.0days; + print 1hr, 1hrs, 1.0hr, 1.0hrs; + print 1min, 1mins, 1.0min, 1.0mins; + print 1sec, 1secs, 1.0sec, 1.0secs; + print 1msec, 1msecs, 1.0msec, 1.0msecs; + print 1usec, 1usecs, 1.0usec, 1.0usecs; + } diff --git a/tests/run.py b/tests/run.py deleted file mode 100644 index edebc7a..0000000 --- a/tests/run.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -""" - Pygments unit tests - ~~~~~~~~~~~~~~~~~~ - - Usage:: - - python run.py [testfile ...] - - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from __future__ import print_function - -import os -import sys -import warnings - -# only find tests in this directory -if os.path.dirname(__file__): - os.chdir(os.path.dirname(__file__)) - -# make FutureWarnings (coming from Regex syntax most likely) and -# DeprecationWarnings due to non-raw strings an error -warnings.filterwarnings("error", module=r"pygments\..*", - category=FutureWarning) -warnings.filterwarnings("error", module=r".*pygments.*", - category=DeprecationWarning) - - -try: - import nose -except ImportError: - print('nose is required to run the Pygments test suite') - sys.exit(1) - -# make sure the current source is first on sys.path -sys.path.insert(0, '..') - -if '--with-coverage' not in sys.argv: - # if running with coverage, pygments should not be imported before coverage - # is started, otherwise it will count already executed lines as uncovered - try: - import pygments - except ImportError as err: - print('Cannot find Pygments to test: %s' % err) - sys.exit(1) - else: - print('Pygments %s test suite running (Python %s)...' % - (pygments.__version__, sys.version.split()[0]), - file=sys.stderr) -else: - print('Pygments test suite running (Python %s)...' % sys.version.split()[0], - file=sys.stderr) - -nose.main() diff --git a/tests/string_asserts.py b/tests/string_asserts.py deleted file mode 100644 index a02c52b..0000000 --- a/tests/string_asserts.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -""" - Pygments string assert utility - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -class StringTests(object): - - def assertStartsWith(self, haystack, needle, msg=None): - if msg is None: - msg = "'{0}' does not start with '{1}'".format(haystack, needle) - if not haystack.startswith(needle): - raise(AssertionError(msg)) - - def assertEndsWith(self, haystack, needle, msg=None): - if msg is None: - msg = "'{0}' does not end with '{1}'".format(haystack, needle) - if not haystack.endswith(needle): - raise(AssertionError(msg)) diff --git a/tests/support.py b/tests/support.py deleted file mode 100644 index c66ac66..0000000 --- a/tests/support.py +++ /dev/null @@ -1,17 +0,0 @@ -# coding: utf-8 -""" -Support for Pygments tests -""" - -import os - -from nose import SkipTest - - -def location(mod_name): - """ - Return the file and directory that the code for *mod_name* is in. - """ - source = mod_name.endswith("pyc") and mod_name[:-1] or mod_name - source = os.path.abspath(source) - return source, os.path.dirname(source) diff --git a/tests/test_apache_conf.py b/tests/test_apache_conf.py new file mode 100644 index 0000000..1e906c1 --- /dev/null +++ b/tests/test_apache_conf.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +""" + Basic Apache Configuration Test + ~~~~~~~~~~~~~~~~~-------------- + + :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import textwrap + +import pytest + +from pygments.token import Text, Number, Token +from pygments.lexers import configs + + +@pytest.fixture(scope='module') +def lexer(): + yield configs.ApacheConfLexer() + + +def test_multiline_comment(lexer): + fragment = '#SecAction \\\n "id:\'900004\', \\\n phase:1, \\\n t:none, \\\n setvar:tx.anomaly_score_blocking=on, \\\n nolog, \\\n pass"\n \n' + tokens = [ + (Token.Comment, '#SecAction \\\n "id:\'900004\', \\\n phase:1, \\\n t:none, \\\n setvar:tx.anomaly_score_blocking=on, \\\n nolog, \\\n pass"'), + (Token.Text, '\n \n'), + ] + assert list(lexer.get_tokens(fragment)) == tokens + +def test_multiline_argument(lexer): + fragment = 'SecAction \\\n "id:\'900001\', \\\n phase:1, \\\n t:none, \\\n setvar:tx.critical_anomaly_score=5, \\\n setvar:tx.error_anomaly_score=4, \\\n setvar:tx.warning_anomaly_score=3, \\\n setvar:tx.notice_anomaly_score=2, \\\n nolog, \\\n pass"\n' + tokens = [ + (Token.Name.Builtin, 'SecAction'), + (Token.Text, ' '), + (Token.Text, '\\\n'), + (Token.Text, ' '), + (Token.Literal.String.Double, '"id:\'900001\', \\\n phase:1, \\\n t:none, \\\n setvar:tx.critical_anomaly_score=5, \\\n setvar:tx.error_anomaly_score=4, \\\n setvar:tx.warning_anomaly_score=3, \\\n setvar:tx.notice_anomaly_score=2, \\\n nolog, \\\n pass"'), + (Token.Text, ''), + (Token.Text, '\n'), + ] + assert list(lexer.get_tokens(fragment)) == tokens \ No newline at end of file diff --git a/tests/test_basic.py b/tests/test_basic.py index 03d10cd..e2255f5 100644 --- a/tests/test_basic.py +++ b/tests/test_basic.py @@ -3,72 +3,71 @@ Pygments Basic lexers tests ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ -import unittest + +import pytest from pygments.lexers.basic import VBScriptLexer from pygments.token import Error, Name, Number, Punctuation, String, Whitespace -class VBScriptLexerTest(unittest.TestCase): - - def setUp(self): - self.lexer = VBScriptLexer() - - def _assert_are_tokens_of_type(self, examples, expected_token_type): - for test_number, example in enumerate(examples.split(), 1): - token_count = 0 - for token_type, token_value in self.lexer.get_tokens(example): - if token_type != Whitespace: - token_count += 1 - self.assertEqual( - token_type, expected_token_type, - 'token_type #%d for %s is be %s but must be %s' % - (test_number, token_value, token_type, expected_token_type)) - self.assertEqual( - token_count, 1, - '%s must yield exactly 1 token instead of %d' % - (example, token_count)) - - def _assert_tokens_match(self, text, expected_tokens_without_trailing_newline): - actual_tokens = tuple(self.lexer.get_tokens(text)) - if (len(actual_tokens) >= 1) and (actual_tokens[-1] == (Whitespace, '\n')): - actual_tokens = tuple(actual_tokens[:-1]) - self.assertEqual( - expected_tokens_without_trailing_newline, actual_tokens, - 'text must yield expected tokens: %s' % text) - - def test_can_lex_float(self): - self._assert_are_tokens_of_type( - '1. 1.e1 .1 1.2 1.2e3 1.2e+3 1.2e-3 1e2', Number.Float) - self._assert_tokens_match( - '1e2.1e2', - ((Number.Float, '1e2'), (Number.Float, '.1e2')) - ) - - def test_can_reject_almost_float(self): - self._assert_tokens_match( - '.e1', - ((Punctuation, '.'), (Name, 'e1'))) - - def test_can_lex_integer(self): - self._assert_are_tokens_of_type( - '1 23 456', Number.Integer) - - def test_can_lex_names(self): - self._assert_are_tokens_of_type( - u'thingy thingy123 _thingy _123', Name) - - def test_can_recover_after_unterminated_string(self): - self._assert_tokens_match( - '"x\nx', - ((String.Double, '"'), (String.Double, 'x'), (Error, '\n'), (Name, 'x')) - ) - - def test_can_recover_from_invalid_character(self): - self._assert_tokens_match( - 'a;bc\nd', - ((Name, 'a'), (Error, ';bc\n'), (Name, 'd')) - ) +@pytest.fixture(scope='module') +def lexer(): + yield VBScriptLexer() + + +def assert_are_tokens_of_type(lexer, examples, expected_token_type): + for test_number, example in enumerate(examples.split(), 1): + token_count = 0 + for token_type, token_value in lexer.get_tokens(example): + if token_type != Whitespace: + token_count += 1 + assert token_type == expected_token_type, \ + 'token_type #%d for %s is be %s but must be %s' % \ + (test_number, token_value, token_type, expected_token_type) + assert token_count == 1, \ + '%s must yield exactly 1 token instead of %d' % (example, token_count) + + +def assert_tokens_match(lexer, text, expected_tokens_without_trailing_newline): + actual_tokens = tuple(lexer.get_tokens(text)) + if (len(actual_tokens) >= 1) and (actual_tokens[-1] == (Whitespace, '\n')): + actual_tokens = tuple(actual_tokens[:-1]) + assert expected_tokens_without_trailing_newline == actual_tokens, \ + 'text must yield expected tokens: %s' % text + + +def test_can_lex_float(lexer): + assert_are_tokens_of_type(lexer, + '1. 1.e1 .1 1.2 1.2e3 1.2e+3 1.2e-3 1e2', + Number.Float) + assert_tokens_match(lexer, + '1e2.1e2', + ((Number.Float, '1e2'), (Number.Float, '.1e2'))) + + +def test_can_reject_almost_float(lexer): + assert_tokens_match(lexer, '.e1', ((Punctuation, '.'), (Name, 'e1'))) + + +def test_can_lex_integer(lexer): + assert_are_tokens_of_type(lexer, '1 23 456', Number.Integer) + + +def test_can_lex_names(lexer): + assert_are_tokens_of_type(lexer, u'thingy thingy123 _thingy _123', Name) + + +def test_can_recover_after_unterminated_string(lexer): + assert_tokens_match(lexer, + '"x\nx', + ((String.Double, '"'), (String.Double, 'x'), + (Error, '\n'), (Name, 'x'))) + + +def test_can_recover_from_invalid_character(lexer): + assert_tokens_match(lexer, + 'a;bc\nd', + ((Name, 'a'), (Error, ';bc\n'), (Name, 'd'))) diff --git a/tests/test_basic_api.py b/tests/test_basic_api.py index b1b6926..056f106 100644 --- a/tests/test_basic_api.py +++ b/tests/test_basic_api.py @@ -10,7 +10,9 @@ from __future__ import print_function import random -import unittest +from os import path + +import pytest from pygments import lexers, formatters, lex, format from pygments.token import _TokenType, Text @@ -18,116 +20,105 @@ from pygments.lexer import RegexLexer from pygments.formatters.img import FontNotFound from pygments.util import text_type, StringIO, BytesIO, xrange, ClassNotFound -import support - -TESTFILE, TESTDIR = support.location(__file__) +TESTDIR = path.dirname(path.abspath(__file__)) +TESTFILE = path.join(TESTDIR, 'test_basic_api.py') test_content = [chr(i) for i in xrange(33, 128)] * 5 random.shuffle(test_content) test_content = ''.join(test_content) + '\n' -def test_lexer_instantiate_all(): +@pytest.mark.parametrize('name', lexers.LEXERS) +def test_lexer_instantiate_all(name): # instantiate every lexer, to see if the token type defs are correct - def verify(name): - getattr(lexers, name) - for x in lexers.LEXERS: - yield verify, x + getattr(lexers, name) -def test_lexer_classes(): +@pytest.mark.parametrize('cls', lexers._iter_lexerclasses(plugins=False)) +def test_lexer_classes(cls): # test that every lexer class has the correct public API - def verify(cls): - assert type(cls.name) is str - for attr in 'aliases', 'filenames', 'alias_filenames', 'mimetypes': - assert hasattr(cls, attr) - assert type(getattr(cls, attr)) is list, \ - "%s: %s attribute wrong" % (cls, attr) - result = cls.analyse_text("abc") - assert isinstance(result, float) and 0.0 <= result <= 1.0 - result = cls.analyse_text(".abc") - assert isinstance(result, float) and 0.0 <= result <= 1.0 - - assert all(al.lower() == al for al in cls.aliases) - - inst = cls(opt1="val1", opt2="val2") - if issubclass(cls, RegexLexer): - if not hasattr(cls, '_tokens'): - # if there's no "_tokens", the lexer has to be one with - # multiple tokendef variants - assert cls.token_variants - for variant in cls.tokens: - assert 'root' in cls.tokens[variant] - else: - assert 'root' in cls._tokens, \ - '%s has no root state' % cls - - if cls.name in ['XQuery', 'Opa']: # XXX temporary - return - - try: - tokens = list(inst.get_tokens(test_content)) - except KeyboardInterrupt: - raise KeyboardInterrupt( - 'interrupted %s.get_tokens(): test_content=%r' % - (cls.__name__, test_content)) - txt = "" - for token in tokens: - assert isinstance(token, tuple) - assert isinstance(token[0], _TokenType) - assert isinstance(token[1], text_type) - txt += token[1] - assert txt == test_content, "%s lexer roundtrip failed: %r != %r" % \ - (cls.name, test_content, txt) - - for lexer in lexers._iter_lexerclasses(plugins=False): - yield verify, lexer - - -def test_lexer_options(): + assert type(cls.name) is str + for attr in 'aliases', 'filenames', 'alias_filenames', 'mimetypes': + assert hasattr(cls, attr) + assert type(getattr(cls, attr)) is list, \ + "%s: %s attribute wrong" % (cls, attr) + result = cls.analyse_text("abc") + assert isinstance(result, float) and 0.0 <= result <= 1.0 + result = cls.analyse_text(".abc") + assert isinstance(result, float) and 0.0 <= result <= 1.0 + + assert all(al.lower() == al for al in cls.aliases) + + inst = cls(opt1="val1", opt2="val2") + if issubclass(cls, RegexLexer): + if not hasattr(cls, '_tokens'): + # if there's no "_tokens", the lexer has to be one with + # multiple tokendef variants + assert cls.token_variants + for variant in cls.tokens: + assert 'root' in cls.tokens[variant] + else: + assert 'root' in cls._tokens, \ + '%s has no root state' % cls + + if cls.name in ['XQuery', 'Opa']: # XXX temporary + return + + try: + tokens = list(inst.get_tokens(test_content)) + except KeyboardInterrupt: + raise KeyboardInterrupt( + 'interrupted %s.get_tokens(): test_content=%r' % + (cls.__name__, test_content)) + txt = "" + for token in tokens: + assert isinstance(token, tuple) + assert isinstance(token[0], _TokenType) + assert isinstance(token[1], text_type) + txt += token[1] + assert txt == test_content, "%s lexer roundtrip failed: %r != %r" % \ + (cls.name, test_content, txt) + + +@pytest.mark.parametrize('cls', lexers._iter_lexerclasses(plugins=False)) +def test_lexer_options(cls): + if cls.__name__ == 'RawTokenLexer': + # this one is special + return + # test that the basic options work def ensure(tokens, output): concatenated = ''.join(token[1] for token in tokens) assert concatenated == output, \ - '%s: %r != %r' % (lexer, concatenated, output) - - def verify(cls): - inst = cls(stripnl=False) - ensure(inst.get_tokens('a\nb'), 'a\nb\n') - ensure(inst.get_tokens('\n\n\n'), '\n\n\n') - inst = cls(stripall=True) - ensure(inst.get_tokens(' \n b\n\n\n'), 'b\n') - # some lexers require full lines in input - if ('ConsoleLexer' not in cls.__name__ and - 'SessionLexer' not in cls.__name__ and - not cls.__name__.startswith('Literate') and - cls.__name__ not in ('ErlangShellLexer', 'RobotFrameworkLexer')): - inst = cls(ensurenl=False) - ensure(inst.get_tokens('a\nb'), 'a\nb') - inst = cls(ensurenl=False, stripall=True) - ensure(inst.get_tokens('a\nb\n\n'), 'a\nb') - - for lexer in lexers._iter_lexerclasses(plugins=False): - if lexer.__name__ == 'RawTokenLexer': - # this one is special - continue - yield verify, lexer + '%s: %r != %r' % (cls, concatenated, output) + + inst = cls(stripnl=False) + ensure(inst.get_tokens('a\nb'), 'a\nb\n') + ensure(inst.get_tokens('\n\n\n'), '\n\n\n') + inst = cls(stripall=True) + ensure(inst.get_tokens(' \n b\n\n\n'), 'b\n') + # some lexers require full lines in input + if ('ConsoleLexer' not in cls.__name__ and + 'SessionLexer' not in cls.__name__ and + not cls.__name__.startswith('Literate') and + cls.__name__ not in ('ErlangShellLexer', 'RobotFrameworkLexer')): + inst = cls(ensurenl=False) + ensure(inst.get_tokens('a\nb'), 'a\nb') + inst = cls(ensurenl=False, stripall=True) + ensure(inst.get_tokens('a\nb\n\n'), 'a\nb') def test_get_lexers(): # test that the lexers functions work - def verify(func, args): - x = func(opt='val', *args) - assert isinstance(x, lexers.PythonLexer) - assert x.options["opt"] == "val" - for func, args in [(lexers.get_lexer_by_name, ("python",)), (lexers.get_lexer_for_filename, ("test.py",)), (lexers.get_lexer_for_mimetype, ("text/x-python",)), - (lexers.guess_lexer, ("#!/usr/bin/python -O\nprint",)), + (lexers.guess_lexer, ("#!/usr/bin/python3 -O\nprint",)), (lexers.guess_lexer_for_filename, ("a.py", "<%= @foo %>")) ]: - yield verify, func, args + x = func(opt='val', *args) + assert isinstance(x, lexers.PythonLexer) + assert x.options["opt"] == "val" for cls, (_, lname, aliases, _, mimetypes) in lexers.LEXERS.items(): assert cls == lexers.find_lexer_class(lname).__name__ @@ -146,38 +137,35 @@ def test_get_lexers(): raise Exception -def test_formatter_public_api(): +@pytest.mark.parametrize('cls', [getattr(formatters, name) + for name in formatters.FORMATTERS]) +def test_formatter_public_api(cls): # test that every formatter class has the correct public API ts = list(lexers.PythonLexer().get_tokens("def f(): pass")) string_out = StringIO() bytes_out = BytesIO() - def verify(formatter): - info = formatters.FORMATTERS[formatter.__name__] - assert len(info) == 5 - assert info[1], "missing formatter name" - assert info[2], "missing formatter aliases" - assert info[4], "missing formatter docstring" - - try: - inst = formatter(opt1="val1") - except (ImportError, FontNotFound) as e: - raise support.SkipTest(e) - - try: - inst.get_style_defs() - except NotImplementedError: - # may be raised by formatters for which it doesn't make sense - pass - - if formatter.unicodeoutput: - inst.format(ts, string_out) - else: - inst.format(ts, bytes_out) + info = formatters.FORMATTERS[cls.__name__] + assert len(info) == 5 + assert info[1], "missing formatter name" + assert info[2], "missing formatter aliases" + assert info[4], "missing formatter docstring" + + try: + inst = cls(opt1="val1") + except (ImportError, FontNotFound) as e: + pytest.skip(str(e)) + + try: + inst.get_style_defs() + except NotImplementedError: + # may be raised by formatters for which it doesn't make sense + pass - for name in formatters.FORMATTERS: - formatter = getattr(formatters, name) - yield verify, formatter + if cls.unicodeoutput: + inst.format(ts, string_out) + else: + inst.format(ts, bytes_out) def test_formatter_encodings(): @@ -201,37 +189,33 @@ def test_formatter_encodings(): assert u"ä".encode("utf8") in format(tokens, fmt) -def test_formatter_unicode_handling(): +@pytest.mark.parametrize('cls', [getattr(formatters, name) + for name in formatters.FORMATTERS]) +def test_formatter_unicode_handling(cls): # test that the formatter supports encoding and Unicode tokens = list(lexers.PythonLexer(encoding='utf-8'). get_tokens("def f(): 'ä'")) - def verify(formatter): - try: - inst = formatter(encoding=None) - except (ImportError, FontNotFound) as e: - # some dependency or font not installed - raise support.SkipTest(e) - - if formatter.name != 'Raw tokens': - out = format(tokens, inst) - if formatter.unicodeoutput: - assert type(out) is text_type, '%s: %r' % (formatter, out) - - inst = formatter(encoding='utf-8') - out = format(tokens, inst) - assert type(out) is bytes, '%s: %r' % (formatter, out) - # Cannot test for encoding, since formatters may have to escape - # non-ASCII characters. - else: - inst = formatter() - out = format(tokens, inst) - assert type(out) is bytes, '%s: %r' % (formatter, out) - - for formatter, info in formatters.FORMATTERS.items(): - # this tests the automatic importing as well - fmter = getattr(formatters, formatter) - yield verify, fmter + try: + inst = cls(encoding=None) + except (ImportError, FontNotFound) as e: + # some dependency or font not installed + pytest.skip(str(e)) + + if cls.name != 'Raw tokens': + out = format(tokens, inst) + if cls.unicodeoutput: + assert type(out) is text_type, '%s: %r' % (cls, out) + + inst = cls(encoding='utf-8') + out = format(tokens, inst) + assert type(out) is bytes, '%s: %r' % (cls, out) + # Cannot test for encoding, since formatters may have to escape + # non-ASCII characters. + else: + inst = cls() + out = format(tokens, inst) + assert type(out) is bytes, '%s: %r' % (cls, out) def test_get_formatters(): @@ -268,7 +252,7 @@ def test_bare_class_handler(): assert False, 'nothing raised' -class FiltersTest(unittest.TestCase): +class TestFilters(object): def test_basic(self): filters_args = [ @@ -287,19 +271,18 @@ class FiltersTest(unittest.TestCase): with open(TESTFILE, 'rb') as fp: text = fp.read().decode('utf-8') tokens = list(lx.get_tokens(text)) - self.assertTrue(all(isinstance(t[1], text_type) - for t in tokens), - '%s filter did not return Unicode' % x) + assert all(isinstance(t[1], text_type) for t in tokens), \ + '%s filter did not return Unicode' % x roundtext = ''.join([t[1] for t in tokens]) if x not in ('whitespace', 'keywordcase', 'gobble'): # these filters change the text - self.assertEqual(roundtext, text, - "lexer roundtrip with %s filter failed" % x) + assert roundtext == text, \ + "lexer roundtrip with %s filter failed" % x def test_raiseonerror(self): lx = lexers.PythonLexer() lx.add_filter('raiseonerror', excclass=RuntimeError) - self.assertRaises(RuntimeError, list, lx.get_tokens('$')) + assert pytest.raises(RuntimeError, list, lx.get_tokens('$')) def test_whitespace(self): lx = lexers.PythonLexer() @@ -307,7 +290,7 @@ class FiltersTest(unittest.TestCase): with open(TESTFILE, 'rb') as fp: text = fp.read().decode('utf-8') lxtext = ''.join([t[1] for t in list(lx.get_tokens(text))]) - self.assertFalse(' ' in lxtext) + assert ' ' not in lxtext def test_keywordcase(self): lx = lexers.PythonLexer() @@ -315,15 +298,15 @@ class FiltersTest(unittest.TestCase): with open(TESTFILE, 'rb') as fp: text = fp.read().decode('utf-8') lxtext = ''.join([t[1] for t in list(lx.get_tokens(text))]) - self.assertTrue('Def' in lxtext and 'Class' in lxtext) + assert 'Def' in lxtext and 'Class' in lxtext def test_codetag(self): lx = lexers.PythonLexer() lx.add_filter('codetagify') text = u'# BUG: text' tokens = list(lx.get_tokens(text)) - self.assertEqual('# ', tokens[0][1]) - self.assertEqual('BUG', tokens[1][1]) + assert '# ' == tokens[0][1] + assert 'BUG' == tokens[1][1] def test_codetag_boundary(self): # ticket #368 @@ -331,4 +314,4 @@ class FiltersTest(unittest.TestCase): lx.add_filter('codetagify') text = u'# DEBUG: text' tokens = list(lx.get_tokens(text)) - self.assertEqual('# DEBUG: text', tokens[0][1]) + assert '# DEBUG: text' == tokens[0][1] diff --git a/tests/test_bibtex.py b/tests/test_bibtex.py index 2f1c395..756a658 100644 --- a/tests/test_bibtex.py +++ b/tests/test_bibtex.py @@ -8,229 +8,233 @@ """ import textwrap -import unittest + +import pytest from pygments.lexers import BibTeXLexer, BSTLexer from pygments.token import Token -class BibTeXTest(unittest.TestCase): - def setUp(self): - self.lexer = BibTeXLexer() - - def testPreamble(self): - data = u'@PREAMBLE{"% some LaTeX code here"}' - tokens = [ - (Token.Name.Class, u'@PREAMBLE'), - (Token.Punctuation, u'{'), - (Token.String, u'"'), - (Token.String, u'% some LaTeX code here'), - (Token.String, u'"'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - ] - self.assertEqual(list(self.lexer.get_tokens(data)), tokens) - - def testString(self): - data = u'@STRING(SCI = "Science")' - tokens = [ - (Token.Name.Class, u'@STRING'), - (Token.Punctuation, u'('), - (Token.Name.Attribute, u'SCI'), - (Token.Text, u' '), - (Token.Punctuation, u'='), - (Token.Text, u' '), - (Token.String, u'"'), - (Token.String, u'Science'), - (Token.String, u'"'), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - ] - self.assertEqual(list(self.lexer.get_tokens(data)), tokens) - - def testEntry(self): - data = u""" - This is a comment. - - @ARTICLE{ruckenstein-diffusion, - author = "Liu, Hongquin" # and # "Ruckenstein, Eli", - year = 1997, - month = JAN, - pages = "888-895" - } - """ - - tokens = [ - (Token.Comment, u'This is a comment.'), - (Token.Text, u'\n\n'), - (Token.Name.Class, u'@ARTICLE'), - (Token.Punctuation, u'{'), - (Token.Name.Label, u'ruckenstein-diffusion'), - (Token.Punctuation, u','), - (Token.Text, u'\n '), - (Token.Name.Attribute, u'author'), - (Token.Text, u' '), - (Token.Punctuation, u'='), - (Token.Text, u' '), - (Token.String, u'"'), - (Token.String, u'Liu, Hongquin'), - (Token.String, u'"'), - (Token.Text, u' '), - (Token.Punctuation, u'#'), - (Token.Text, u' '), - (Token.Name.Variable, u'and'), - (Token.Text, u' '), - (Token.Punctuation, u'#'), - (Token.Text, u' '), - (Token.String, u'"'), - (Token.String, u'Ruckenstein, Eli'), - (Token.String, u'"'), - (Token.Punctuation, u','), - (Token.Text, u'\n '), - (Token.Name.Attribute, u'year'), - (Token.Text, u' '), - (Token.Punctuation, u'='), - (Token.Text, u' '), - (Token.Number, u'1997'), - (Token.Punctuation, u','), - (Token.Text, u'\n '), - (Token.Name.Attribute, u'month'), - (Token.Text, u' '), - (Token.Punctuation, u'='), - (Token.Text, u' '), - (Token.Name.Variable, u'JAN'), - (Token.Punctuation, u','), - (Token.Text, u'\n '), - (Token.Name.Attribute, u'pages'), - (Token.Text, u' '), - (Token.Punctuation, u'='), - (Token.Text, u' '), - (Token.String, u'"'), - (Token.String, u'888-895'), - (Token.String, u'"'), - (Token.Text, u'\n'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - ] - self.assertEqual(list(self.lexer.get_tokens(textwrap.dedent(data))), tokens) - - def testComment(self): - data = '@COMMENT{test}' - tokens = [ - (Token.Comment, u'@COMMENT'), - (Token.Comment, u'{test}'), - (Token.Text, u'\n'), - ] - self.assertEqual(list(self.lexer.get_tokens(data)), tokens) - - def testMissingBody(self): - data = '@ARTICLE xxx' - tokens = [ - (Token.Name.Class, u'@ARTICLE'), - (Token.Text, u' '), - (Token.Error, u'x'), - (Token.Error, u'x'), - (Token.Error, u'x'), - (Token.Text, u'\n'), - ] - self.assertEqual(list(self.lexer.get_tokens(data)), tokens) - - def testMismatchedBrace(self): - data = '@PREAMBLE(""}' - tokens = [ - (Token.Name.Class, u'@PREAMBLE'), - (Token.Punctuation, u'('), - (Token.String, u'"'), - (Token.String, u'"'), - (Token.Error, u'}'), - (Token.Text, u'\n'), - ] - self.assertEqual(list(self.lexer.get_tokens(data)), tokens) - - -class BSTTest(unittest.TestCase): - def setUp(self): - self.lexer = BSTLexer() - - def testBasicBST(self): - data = """ - % BibTeX standard bibliography style `plain' - - INTEGERS { output.state before.all } - - FUNCTION {sort.format.title} - { 't := - "A " #2 - "An " #3 - "The " #4 t chop.word - chop.word +@pytest.fixture(scope='module') +def lexer(): + yield BibTeXLexer() + + +def test_preamble(lexer): + data = u'@PREAMBLE{"% some LaTeX code here"}' + tokens = [ + (Token.Name.Class, u'@PREAMBLE'), + (Token.Punctuation, u'{'), + (Token.String, u'"'), + (Token.String, u'% some LaTeX code here'), + (Token.String, u'"'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(data)) == tokens + + +def test_string(lexer): + data = u'@STRING(SCI = "Science")' + tokens = [ + (Token.Name.Class, u'@STRING'), + (Token.Punctuation, u'('), + (Token.Name.Attribute, u'SCI'), + (Token.Text, u' '), + (Token.Punctuation, u'='), + (Token.Text, u' '), + (Token.String, u'"'), + (Token.String, u'Science'), + (Token.String, u'"'), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(data)) == tokens + + +def test_entry(lexer): + data = u""" + This is a comment. + + @ARTICLE{ruckenstein-diffusion, + author = "Liu, Hongquin" # and # "Ruckenstein, Eli", + year = 1997, + month = JAN, + pages = "888-895" + } + """ + + tokens = [ + (Token.Comment, u'This is a comment.'), + (Token.Text, u'\n\n'), + (Token.Name.Class, u'@ARTICLE'), + (Token.Punctuation, u'{'), + (Token.Name.Label, u'ruckenstein-diffusion'), + (Token.Punctuation, u','), + (Token.Text, u'\n '), + (Token.Name.Attribute, u'author'), + (Token.Text, u' '), + (Token.Punctuation, u'='), + (Token.Text, u' '), + (Token.String, u'"'), + (Token.String, u'Liu, Hongquin'), + (Token.String, u'"'), + (Token.Text, u' '), + (Token.Punctuation, u'#'), + (Token.Text, u' '), + (Token.Name.Variable, u'and'), + (Token.Text, u' '), + (Token.Punctuation, u'#'), + (Token.Text, u' '), + (Token.String, u'"'), + (Token.String, u'Ruckenstein, Eli'), + (Token.String, u'"'), + (Token.Punctuation, u','), + (Token.Text, u'\n '), + (Token.Name.Attribute, u'year'), + (Token.Text, u' '), + (Token.Punctuation, u'='), + (Token.Text, u' '), + (Token.Number, u'1997'), + (Token.Punctuation, u','), + (Token.Text, u'\n '), + (Token.Name.Attribute, u'month'), + (Token.Text, u' '), + (Token.Punctuation, u'='), + (Token.Text, u' '), + (Token.Name.Variable, u'JAN'), + (Token.Punctuation, u','), + (Token.Text, u'\n '), + (Token.Name.Attribute, u'pages'), + (Token.Text, u' '), + (Token.Punctuation, u'='), + (Token.Text, u' '), + (Token.String, u'"'), + (Token.String, u'888-895'), + (Token.String, u'"'), + (Token.Text, u'\n'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(textwrap.dedent(data))) == tokens + + +def test_comment(lexer): + data = '@COMMENT{test}' + tokens = [ + (Token.Comment, u'@COMMENT'), + (Token.Comment, u'{test}'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(data)) == tokens + + +def test_missing_body(lexer): + data = '@ARTICLE xxx' + tokens = [ + (Token.Name.Class, u'@ARTICLE'), + (Token.Text, u' '), + (Token.Error, u'x'), + (Token.Error, u'x'), + (Token.Error, u'x'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(data)) == tokens + + +def test_mismatched_brace(lexer): + data = '@PREAMBLE(""}' + tokens = [ + (Token.Name.Class, u'@PREAMBLE'), + (Token.Punctuation, u'('), + (Token.String, u'"'), + (Token.String, u'"'), + (Token.Error, u'}'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(data)) == tokens + + +def test_basic_bst(): + lexer = BSTLexer() + data = """ + % BibTeX standard bibliography style `plain' + + INTEGERS { output.state before.all } + + FUNCTION {sort.format.title} + { 't := + "A " #2 + "An " #3 + "The " #4 t chop.word chop.word - sortify - #1 global.max$ substring$ - } - - ITERATE {call.type$} - """ - tokens = [ - (Token.Comment.SingleLine, "% BibTeX standard bibliography style `plain'"), - (Token.Text, u'\n\n'), - (Token.Keyword, u'INTEGERS'), - (Token.Text, u' '), - (Token.Punctuation, u'{'), - (Token.Text, u' '), - (Token.Name.Variable, u'output.state'), - (Token.Text, u' '), - (Token.Name.Variable, u'before.all'), - (Token.Text, u' '), - (Token.Punctuation, u'}'), - (Token.Text, u'\n\n'), - (Token.Keyword, u'FUNCTION'), - (Token.Text, u' '), - (Token.Punctuation, u'{'), - (Token.Name.Variable, u'sort.format.title'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - (Token.Punctuation, u'{'), - (Token.Text, u' '), - (Token.Name.Function, u"'t"), - (Token.Text, u' '), - (Token.Name.Variable, u':='), - (Token.Text, u'\n'), - (Token.Literal.String, u'"A "'), - (Token.Text, u' '), - (Token.Literal.Number, u'#2'), - (Token.Text, u'\n '), - (Token.Literal.String, u'"An "'), - (Token.Text, u' '), - (Token.Literal.Number, u'#3'), - (Token.Text, u'\n '), - (Token.Literal.String, u'"The "'), - (Token.Text, u' '), - (Token.Literal.Number, u'#4'), - (Token.Text, u' '), - (Token.Name.Variable, u't'), - (Token.Text, u' '), - (Token.Name.Variable, u'chop.word'), - (Token.Text, u'\n '), - (Token.Name.Variable, u'chop.word'), - (Token.Text, u'\n'), - (Token.Name.Variable, u'chop.word'), - (Token.Text, u'\n'), - (Token.Name.Variable, u'sortify'), - (Token.Text, u'\n'), - (Token.Literal.Number, u'#1'), - (Token.Text, u' '), - (Token.Name.Builtin, u'global.max$'), - (Token.Text, u' '), - (Token.Name.Builtin, u'substring$'), - (Token.Text, u'\n'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n\n'), - (Token.Keyword, u'ITERATE'), - (Token.Text, u' '), - (Token.Punctuation, u'{'), - (Token.Name.Builtin, u'call.type$'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - ] - self.assertEqual(list(self.lexer.get_tokens(textwrap.dedent(data))), tokens) + chop.word + sortify + #1 global.max$ substring$ + } + + ITERATE {call.type$} + """ + tokens = [ + (Token.Comment.SingleLine, "% BibTeX standard bibliography style `plain'"), + (Token.Text, u'\n\n'), + (Token.Keyword, u'INTEGERS'), + (Token.Text, u' '), + (Token.Punctuation, u'{'), + (Token.Text, u' '), + (Token.Name.Variable, u'output.state'), + (Token.Text, u' '), + (Token.Name.Variable, u'before.all'), + (Token.Text, u' '), + (Token.Punctuation, u'}'), + (Token.Text, u'\n\n'), + (Token.Keyword, u'FUNCTION'), + (Token.Text, u' '), + (Token.Punctuation, u'{'), + (Token.Name.Variable, u'sort.format.title'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + (Token.Punctuation, u'{'), + (Token.Text, u' '), + (Token.Name.Function, u"'t"), + (Token.Text, u' '), + (Token.Name.Variable, u':='), + (Token.Text, u'\n'), + (Token.Literal.String, u'"A "'), + (Token.Text, u' '), + (Token.Literal.Number, u'#2'), + (Token.Text, u'\n '), + (Token.Literal.String, u'"An "'), + (Token.Text, u' '), + (Token.Literal.Number, u'#3'), + (Token.Text, u'\n '), + (Token.Literal.String, u'"The "'), + (Token.Text, u' '), + (Token.Literal.Number, u'#4'), + (Token.Text, u' '), + (Token.Name.Variable, u't'), + (Token.Text, u' '), + (Token.Name.Variable, u'chop.word'), + (Token.Text, u'\n '), + (Token.Name.Variable, u'chop.word'), + (Token.Text, u'\n'), + (Token.Name.Variable, u'chop.word'), + (Token.Text, u'\n'), + (Token.Name.Variable, u'sortify'), + (Token.Text, u'\n'), + (Token.Literal.Number, u'#1'), + (Token.Text, u' '), + (Token.Name.Builtin, u'global.max$'), + (Token.Text, u' '), + (Token.Name.Builtin, u'substring$'), + (Token.Text, u'\n'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n\n'), + (Token.Keyword, u'ITERATE'), + (Token.Text, u' '), + (Token.Punctuation, u'{'), + (Token.Name.Builtin, u'call.type$'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(textwrap.dedent(data))) == tokens diff --git a/tests/test_cfm.py b/tests/test_cfm.py index e7147a6..e317521 100644 --- a/tests/test_cfm.py +++ b/tests/test_cfm.py @@ -7,40 +7,40 @@ :license: BSD, see LICENSE for details. """ -import unittest -import os +import pytest from pygments.token import Token from pygments.lexers import ColdfusionHtmlLexer -class ColdfusionHtmlLexerTest(unittest.TestCase): - - def setUp(self): - self.lexer = ColdfusionHtmlLexer() - - def testBasicComment(self): - fragment = u'' - expected = [ - (Token.Text, u''), - (Token.Comment.Multiline, u''), - (Token.Text, u'\n'), - ] - self.assertEqual(expected, list(self.lexer.get_tokens(fragment))) - - def testNestedComment(self): - fragment = u' --->' - expected = [ - (Token.Text, u''), - (Token.Comment.Multiline, u''), - (Token.Comment.Multiline, u' '), - (Token.Comment.Multiline, u'--->'), - (Token.Text, u'\n'), - ] - self.assertEqual(expected, list(self.lexer.get_tokens(fragment))) +@pytest.fixture(scope='module') +def lexer(): + yield ColdfusionHtmlLexer() + + +def test_basic_comment(lexer): + fragment = u'' + expected = [ + (Token.Text, u''), + (Token.Comment.Multiline, u''), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(fragment)) == expected + + +def test_nested_comment(lexer): + fragment = u' --->' + expected = [ + (Token.Text, u''), + (Token.Comment.Multiline, u''), + (Token.Comment.Multiline, u' '), + (Token.Comment.Multiline, u'--->'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(fragment)) == expected diff --git a/tests/test_clexer.py b/tests/test_clexer.py index 64b765e..69f39b2 100644 --- a/tests/test_clexer.py +++ b/tests/test_clexer.py @@ -7,253 +7,259 @@ :license: BSD, see LICENSE for details. """ -import unittest -import os import textwrap +import pytest + from pygments.token import Text, Number, Token from pygments.lexers import CLexer -class CLexerTest(unittest.TestCase): +@pytest.fixture(scope='module') +def lexer(): + yield CLexer() - def setUp(self): - self.lexer = CLexer() - def testNumbers(self): - code = '42 23.42 23. .42 023 0xdeadbeef 23e+42 42e-23' - wanted = [] - for item in zip([Number.Integer, Number.Float, Number.Float, - Number.Float, Number.Oct, Number.Hex, - Number.Float, Number.Float], code.split()): - wanted.append(item) - wanted.append((Text, ' ')) - wanted = wanted[:-1] + [(Text, '\n')] - self.assertEqual(list(self.lexer.get_tokens(code)), wanted) +def test_numbers(lexer): + code = '42 23.42 23. .42 023 0xdeadbeef 23e+42 42e-23' + wanted = [] + for item in zip([Number.Integer, Number.Float, Number.Float, + Number.Float, Number.Oct, Number.Hex, + Number.Float, Number.Float], code.split()): + wanted.append(item) + wanted.append((Text, ' ')) + wanted = wanted[:-1] + [(Text, '\n')] + assert list(lexer.get_tokens(code)) == wanted - def testSwitch(self): - fragment = u'''\ - int main() - { - switch (0) - { - case 0: - default: - ; - } - } - ''' - tokens = [ - (Token.Keyword.Type, u'int'), - (Token.Text, u' '), - (Token.Name.Function, u'main'), - (Token.Punctuation, u'('), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - (Token.Punctuation, u'{'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'switch'), - (Token.Text, u' '), - (Token.Punctuation, u'('), - (Token.Literal.Number.Integer, u'0'), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Punctuation, u'{'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'case'), - (Token.Text, u' '), - (Token.Literal.Number.Integer, u'0'), - (Token.Operator, u':'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'default'), - (Token.Operator, u':'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Punctuation, u';'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(textwrap.dedent(fragment)))) - def testSwitchSpaceBeforeColon(self): - fragment = u'''\ - int main() +def test_switch(lexer): + fragment = u'''\ + int main() + { + switch (0) { - switch (0) - { - case 0 : - default : - ; - } + case 0: + default: + ; } - ''' - tokens = [ - (Token.Keyword.Type, u'int'), - (Token.Text, u' '), - (Token.Name.Function, u'main'), - (Token.Punctuation, u'('), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - (Token.Punctuation, u'{'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'switch'), - (Token.Text, u' '), - (Token.Punctuation, u'('), - (Token.Literal.Number.Integer, u'0'), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Punctuation, u'{'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'case'), - (Token.Text, u' '), - (Token.Literal.Number.Integer, u'0'), - (Token.Text, u' '), - (Token.Operator, u':'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'default'), - (Token.Text, u' '), - (Token.Operator, u':'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Punctuation, u';'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(textwrap.dedent(fragment)))) + } + ''' + tokens = [ + (Token.Keyword.Type, u'int'), + (Token.Text, u' '), + (Token.Name.Function, u'main'), + (Token.Punctuation, u'('), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + (Token.Punctuation, u'{'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Keyword, u'switch'), + (Token.Text, u' '), + (Token.Punctuation, u'('), + (Token.Literal.Number.Integer, u'0'), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Punctuation, u'{'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Keyword, u'case'), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'0'), + (Token.Operator, u':'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Keyword, u'default'), + (Token.Operator, u':'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Punctuation, u';'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens - def testLabel(self): - fragment = u'''\ - int main() - { - foo: - goto foo; - } - ''' - tokens = [ - (Token.Keyword.Type, u'int'), - (Token.Text, u' '), - (Token.Name.Function, u'main'), - (Token.Punctuation, u'('), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - (Token.Punctuation, u'{'), - (Token.Text, u'\n'), - (Token.Name.Label, u'foo'), - (Token.Punctuation, u':'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'goto'), - (Token.Text, u' '), - (Token.Name, u'foo'), - (Token.Punctuation, u';'), - (Token.Text, u'\n'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(textwrap.dedent(fragment)))) - def testLabelSpaceBeforeColon(self): - fragment = u'''\ - int main() +def test_switch_space_before_colon(lexer): + fragment = u'''\ + int main() + { + switch (0) { - foo : - goto foo; + case 0 : + default : + ; } - ''' - tokens = [ - (Token.Keyword.Type, u'int'), - (Token.Text, u' '), - (Token.Name.Function, u'main'), - (Token.Punctuation, u'('), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - (Token.Punctuation, u'{'), - (Token.Text, u'\n'), - (Token.Name.Label, u'foo'), - (Token.Text, u' '), - (Token.Punctuation, u':'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'goto'), - (Token.Text, u' '), - (Token.Name, u'foo'), - (Token.Punctuation, u';'), - (Token.Text, u'\n'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(textwrap.dedent(fragment)))) + } + ''' + tokens = [ + (Token.Keyword.Type, u'int'), + (Token.Text, u' '), + (Token.Name.Function, u'main'), + (Token.Punctuation, u'('), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + (Token.Punctuation, u'{'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Keyword, u'switch'), + (Token.Text, u' '), + (Token.Punctuation, u'('), + (Token.Literal.Number.Integer, u'0'), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Punctuation, u'{'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Keyword, u'case'), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'0'), + (Token.Text, u' '), + (Token.Operator, u':'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Keyword, u'default'), + (Token.Text, u' '), + (Token.Operator, u':'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Punctuation, u';'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens + + +def test_label(lexer): + fragment = u'''\ + int main() + { + foo: + goto foo; + } + ''' + tokens = [ + (Token.Keyword.Type, u'int'), + (Token.Text, u' '), + (Token.Name.Function, u'main'), + (Token.Punctuation, u'('), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + (Token.Punctuation, u'{'), + (Token.Text, u'\n'), + (Token.Name.Label, u'foo'), + (Token.Punctuation, u':'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Keyword, u'goto'), + (Token.Text, u' '), + (Token.Name, u'foo'), + (Token.Punctuation, u';'), + (Token.Text, u'\n'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens + + +def test_label_space_before_colon(lexer): + fragment = u'''\ + int main() + { + foo : + goto foo; + } + ''' + tokens = [ + (Token.Keyword.Type, u'int'), + (Token.Text, u' '), + (Token.Name.Function, u'main'), + (Token.Punctuation, u'('), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + (Token.Punctuation, u'{'), + (Token.Text, u'\n'), + (Token.Name.Label, u'foo'), + (Token.Text, u' '), + (Token.Punctuation, u':'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Keyword, u'goto'), + (Token.Text, u' '), + (Token.Name, u'foo'), + (Token.Punctuation, u';'), + (Token.Text, u'\n'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens + + +def test_label_followed_by_statement(lexer): + fragment = u'''\ + int main() + { + foo:return 0; + goto foo; + } + ''' + tokens = [ + (Token.Keyword.Type, u'int'), + (Token.Text, u' '), + (Token.Name.Function, u'main'), + (Token.Punctuation, u'('), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + (Token.Punctuation, u'{'), + (Token.Text, u'\n'), + (Token.Name.Label, u'foo'), + (Token.Punctuation, u':'), + (Token.Keyword, u'return'), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'0'), + (Token.Punctuation, u';'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Keyword, u'goto'), + (Token.Text, u' '), + (Token.Name, u'foo'), + (Token.Punctuation, u';'), + (Token.Text, u'\n'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens - def testLabelFollowedByStatement(self): - fragment = u'''\ - int main() - { - foo:return 0; - goto foo; - } - ''' - tokens = [ - (Token.Keyword.Type, u'int'), - (Token.Text, u' '), - (Token.Name.Function, u'main'), - (Token.Punctuation, u'('), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - (Token.Punctuation, u'{'), - (Token.Text, u'\n'), - (Token.Name.Label, u'foo'), - (Token.Punctuation, u':'), - (Token.Keyword, u'return'), - (Token.Text, u' '), - (Token.Literal.Number.Integer, u'0'), - (Token.Punctuation, u';'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'goto'), - (Token.Text, u' '), - (Token.Name, u'foo'), - (Token.Punctuation, u';'), - (Token.Text, u'\n'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(textwrap.dedent(fragment)))) - def testPreprocFile(self): - fragment = u'#include \n' - tokens = [ - (Token.Comment.Preproc, u'#'), - (Token.Comment.Preproc, u'include'), - (Token.Text, u' '), - (Token.Comment.PreprocFile, u''), - (Token.Comment.Preproc, u'\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) +def test_preproc_file(lexer): + fragment = u'#include \n' + tokens = [ + (Token.Comment.Preproc, u'#'), + (Token.Comment.Preproc, u'include'), + (Token.Text, u' '), + (Token.Comment.PreprocFile, u''), + (Token.Comment.Preproc, u'\n'), + ] + assert list(lexer.get_tokens(fragment)) == tokens - def testPreprocFile2(self): - fragment = u'#include "foo.h"\n' - tokens = [ - (Token.Comment.Preproc, u'#'), - (Token.Comment.Preproc, u'include'), - (Token.Text, u' '), - (Token.Comment.PreprocFile, u'"foo.h"'), - (Token.Comment.Preproc, u'\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) +def test_preproc_file2(lexer): + fragment = u'#include "foo.h"\n' + tokens = [ + (Token.Comment.Preproc, u'#'), + (Token.Comment.Preproc, u'include'), + (Token.Text, u' '), + (Token.Comment.PreprocFile, u'"foo.h"'), + (Token.Comment.Preproc, u'\n'), + ] + assert list(lexer.get_tokens(fragment)) == tokens diff --git a/tests/test_cmdline.py b/tests/test_cmdline.py index 169d690..d56e2ae 100644 --- a/tests/test_cmdline.py +++ b/tests/test_cmdline.py @@ -14,14 +14,16 @@ import os import re import sys import tempfile -import unittest +from os import path + +from pytest import raises -import support from pygments import cmdline, highlight from pygments.util import BytesIO, StringIO +TESTDIR = path.dirname(path.abspath(__file__)) +TESTFILE = path.join(TESTDIR, 'test_cmdline.py') -TESTFILE, TESTDIR = support.location(__file__) TESTCODE = '''\ def func(args): pass @@ -65,249 +67,253 @@ def run_cmdline(*args, **kwds): return (ret, _decode_output(out), _decode_output(err)) -class CmdLineTest(unittest.TestCase): - - def check_success(self, *cmdline, **kwds): - code, out, err = run_cmdline(*cmdline, **kwds) - self.assertEqual(code, 0) - self.assertEqual(err, '') - return out - - def check_failure(self, *cmdline, **kwds): - expected_code = kwds.pop('code', 1) - code, out, err = run_cmdline(*cmdline, **kwds) - self.assertEqual(code, expected_code) - self.assertEqual(out, '') - return err - - def test_normal(self): - # test that cmdline gives the same output as library api - from pygments.lexers import PythonLexer - from pygments.formatters import HtmlFormatter - filename = TESTFILE - with open(filename, 'rb') as fp: - code = fp.read() - - output = highlight(code, PythonLexer(), HtmlFormatter()) - - o = self.check_success('-lpython', '-fhtml', filename) - self.assertEqual(o, output) - - def test_stdin(self): - o = self.check_success('-lpython', '-fhtml', stdin=TESTCODE) - o = re.sub('<[^>]*>', '', o) - # rstrip is necessary since HTML inserts a \n after the last - self.assertEqual(o.rstrip(), TESTCODE.rstrip()) - - # guess if no lexer given - o = self.check_success('-fhtml', stdin=TESTCODE) - o = re.sub('<[^>]*>', '', o) - # rstrip is necessary since HTML inserts a \n after the last - self.assertEqual(o.rstrip(), TESTCODE.rstrip()) - - def test_outfile(self): - # test that output file works with and without encoding - fd, name = tempfile.mkstemp() - os.close(fd) - for opts in [['-fhtml', '-o', name, TESTFILE], - ['-flatex', '-o', name, TESTFILE], - ['-fhtml', '-o', name, '-O', 'encoding=utf-8', TESTFILE]]: - try: - self.check_success(*opts) - finally: - os.unlink(name) - - def test_load_from_file(self): - lexer_file = os.path.join(TESTDIR, 'support', 'python_lexer.py') - formatter_file = os.path.join(TESTDIR, 'support', 'html_formatter.py') - - # By default, use CustomLexer - o = self.check_success('-l', lexer_file, '-f', 'html', - '-x', stdin=TESTCODE) - o = re.sub('<[^>]*>', '', o) - # rstrip is necessary since HTML inserts a \n after the last - self.assertEqual(o.rstrip(), TESTCODE.rstrip()) - - # If user specifies a name, use it - o = self.check_success('-f', 'html', '-x', '-l', - lexer_file + ':LexerWrapper', stdin=TESTCODE) - o = re.sub('<[^>]*>', '', o) - # rstrip is necessary since HTML inserts a \n after the last - self.assertEqual(o.rstrip(), TESTCODE.rstrip()) - - # Should also work for formatters - o = self.check_success('-lpython', '-f', - formatter_file + ':HtmlFormatterWrapper', - '-x', stdin=TESTCODE) - o = re.sub('<[^>]*>', '', o) - # rstrip is necessary since HTML inserts a \n after the last - self.assertEqual(o.rstrip(), TESTCODE.rstrip()) - - def test_stream_opt(self): - o = self.check_success('-lpython', '-s', '-fterminal', stdin=TESTCODE) - o = re.sub(r'\x1b\[.*?m', '', o) - self.assertEqual(o.replace('\r\n', '\n'), TESTCODE) - - def test_h_opt(self): - o = self.check_success('-h') - self.assertTrue('Usage:' in o) - - def test_L_opt(self): - o = self.check_success('-L') - self.assertTrue('Lexers' in o and 'Formatters' in o and - 'Filters' in o and 'Styles' in o) - o = self.check_success('-L', 'lexer') - self.assertTrue('Lexers' in o and 'Formatters' not in o) - self.check_success('-L', 'lexers') - - def test_O_opt(self): - filename = TESTFILE - o = self.check_success('-Ofull=1,linenos=true,foo=bar', - '-fhtml', filename) - self.assertTrue('foo, bar=baz=,' in o) - - def test_F_opt(self): - filename = TESTFILE - o = self.check_success('-Fhighlight:tokentype=Name.Blubb,' - 'names=TESTFILE filename', - '-fhtml', filename) - self.assertTrue('foo, bar=baz=,' in o + + +def test_F_opt(): + filename = TESTFILE + o = check_success('-Fhighlight:tokentype=Name.Blubb,' + 'names=TESTFILE filename', '-fhtml', filename) + assert ' + #include + + int main(void); + + int main(void) { + uint8_t x = 42; + uint8_t y = x + 1; + + /* exit 1 for success! */ + return 1; + } + ''' + lexer = guess_lexer(code) + assert isinstance(lexer, CLexer) \ No newline at end of file diff --git a/tests/test_crystal.py b/tests/test_crystal.py index 9a1588f..f4909ac 100644 --- a/tests/test_crystal.py +++ b/tests/test_crystal.py @@ -1,308 +1,316 @@ # -*- coding: utf-8 -*- """ Basic CrystalLexer Test - ~~~~~~~~~~~~~~~~~~~~ + ~~~~~~~~~~~~~~~~~~~~~~~ - :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from __future__ import unicode_literals -import unittest -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Error +import pytest + +from pygments.token import Text, Operator, Keyword, Name, String, Number, \ + Punctuation, Error from pygments.lexers import CrystalLexer -class CrystalTest(unittest.TestCase): +@pytest.fixture(scope='module') +def lexer(): + yield CrystalLexer() + + +def test_range_syntax1(lexer): + fragment = '1...3\n' + tokens = [ + (Number.Integer, '1'), + (Operator, '...'), + (Number.Integer, '3'), + (Text, '\n'), + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_range_syntax2(lexer): + fragment = '1 .. 3\n' + tokens = [ + (Number.Integer, '1'), + (Text, ' '), + (Operator, '..'), + (Text, ' '), + (Number.Integer, '3'), + (Text, '\n'), + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_interpolation_nested_curly(lexer): + fragment = ( + '"A#{ (3..5).group_by { |x| x/2}.map ' + 'do |k,v| "#{k}" end.join }" + "Z"\n') + tokens = [ + (String.Double, '"'), + (String.Double, 'A'), + (String.Interpol, '#{'), + (Text, ' '), + (Punctuation, '('), + (Number.Integer, '3'), + (Operator, '..'), + (Number.Integer, '5'), + (Punctuation, ')'), + (Operator, '.'), + (Name, 'group_by'), + (Text, ' '), + (String.Interpol, '{'), + (Text, ' '), + (Operator, '|'), + (Name, 'x'), + (Operator, '|'), + (Text, ' '), + (Name, 'x'), + (Operator, '/'), + (Number.Integer, '2'), + (String.Interpol, '}'), + (Operator, '.'), + (Name, 'map'), + (Text, ' '), + (Keyword, 'do'), + (Text, ' '), + (Operator, '|'), + (Name, 'k'), + (Punctuation, ','), + (Name, 'v'), + (Operator, '|'), + (Text, ' '), + (String.Double, '"'), + (String.Interpol, '#{'), + (Name, 'k'), + (String.Interpol, '}'), + (String.Double, '"'), + (Text, ' '), + (Keyword, 'end'), + (Operator, '.'), + (Name, 'join'), + (Text, ' '), + (String.Interpol, '}'), + (String.Double, '"'), + (Text, ' '), + (Operator, '+'), + (Text, ' '), + (String.Double, '"'), + (String.Double, 'Z'), + (String.Double, '"'), + (Text, '\n'), + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_operator_methods(lexer): + fragment = '([] of Int32).[]?(5)\n' + tokens = [ + (Punctuation, '('), + (Operator, '['), + (Operator, ']'), + (Text, ' '), + (Keyword, 'of'), + (Text, ' '), + (Name.Builtin, 'Int32'), + (Punctuation, ')'), + (Operator, '.'), + (Name.Operator, '[]?'), + (Punctuation, '('), + (Number.Integer, '5'), + (Punctuation, ')'), + (Text, '\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_array_access(lexer): + fragment = '[5][5]?\n' + tokens = [ + (Operator, '['), + (Number.Integer, '5'), + (Operator, ']'), + (Operator, '['), + (Number.Integer, '5'), + (Operator, ']?'), + (Text, '\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens - def setUp(self): - self.lexer = CrystalLexer() - self.maxDiff = None - def testRangeSyntax1(self): - fragment = '1...3\n' - tokens = [ - (Number.Integer, '1'), - (Operator, '...'), - (Number.Integer, '3'), - (Text, '\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) +def test_numbers(lexer): + for kind, testset in [ + (Number.Integer, '0 1 1_000_000 1u8 11231231231121312i64'), + (Number.Float, '0.0 1.0_f32 1_f32 0f64 1e+4 1e111 1_234.567_890'), + (Number.Bin, '0b1001_0110 0b0u8'), + (Number.Oct, '0o17 0o7_i32'), + (Number.Hex, '0xdeadBEEF'), + ]: + for fragment in testset.split(): + assert list(lexer.get_tokens(fragment + '\n')) == \ + [(kind, fragment), (Text, '\n')] - def testRangeSyntax2(self): - fragment = '1 .. 3\n' - tokens = [ - (Number.Integer, '1'), - (Text, ' '), - (Operator, '..'), - (Text, ' '), - (Number.Integer, '3'), - (Text, '\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + for fragment in '01 0b2 0x129g2 0o12358'.split(): + assert next(lexer.get_tokens(fragment + '\n'))[0] == Error - def testInterpolationNestedCurly(self): - fragment = ( - '"A#{ (3..5).group_by { |x| x/2}.map ' - 'do |k,v| "#{k}" end.join }" + "Z"\n') - tokens = [ - (String.Double, '"'), - (String.Double, 'A'), - (String.Interpol, '#{'), - (Text, ' '), - (Punctuation, '('), - (Number.Integer, '3'), - (Operator, '..'), - (Number.Integer, '5'), - (Punctuation, ')'), - (Operator, '.'), - (Name, 'group_by'), - (Text, ' '), - (String.Interpol, '{'), - (Text, ' '), - (Operator, '|'), - (Name, 'x'), - (Operator, '|'), - (Text, ' '), - (Name, 'x'), - (Operator, '/'), - (Number.Integer, '2'), - (String.Interpol, '}'), - (Operator, '.'), - (Name, 'map'), - (Text, ' '), - (Keyword, 'do'), - (Text, ' '), - (Operator, '|'), - (Name, 'k'), - (Punctuation, ','), - (Name, 'v'), - (Operator, '|'), - (Text, ' '), - (String.Double, '"'), - (String.Interpol, '#{'), - (Name, 'k'), - (String.Interpol, '}'), - (String.Double, '"'), - (Text, ' '), - (Keyword, 'end'), - (Operator, '.'), - (Name, 'join'), - (Text, ' '), - (String.Interpol, '}'), - (String.Double, '"'), - (Text, ' '), - (Operator, '+'), - (Text, ' '), - (String.Double, '"'), - (String.Double, 'Z'), - (String.Double, '"'), - (Text, '\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - def testOperatorMethods(self): - fragment = '([] of Int32).[]?(5)\n' - tokens = [ - (Punctuation, '('), - (Operator, '['), - (Operator, ']'), - (Text, ' '), - (Keyword, 'of'), - (Text, ' '), - (Name.Builtin, 'Int32'), - (Punctuation, ')'), - (Operator, '.'), - (Name.Operator, '[]?'), - (Punctuation, '('), - (Number.Integer, '5'), - (Punctuation, ')'), - (Text, '\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testArrayAccess(self): - fragment = '[5][5]?\n' - tokens = [ - (Operator, '['), - (Number.Integer, '5'), - (Operator, ']'), - (Operator, '['), - (Number.Integer, '5'), - (Operator, ']?'), - (Text, '\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) +def test_chars(lexer): + for fragment in ["'a'", "'я'", "'\\u{1234}'", "'\n'"]: + assert list(lexer.get_tokens(fragment + '\n')) == \ + [(String.Char, fragment), (Text, '\n')] + assert next(lexer.get_tokens("'abc'"))[0] == Error - def testNumbers(self): - for kind, testset in [ - (Number.Integer, '0 1 1_000_000 1u8 11231231231121312i64'), - (Number.Float, '0.0 1.0_f32 1_f32 0f64 1e+4 1e111 1_234.567_890'), - (Number.Bin, '0b1001_0110 0b0u8'), - (Number.Oct, '0o17 0o7_i32'), - (Number.Hex, '0xdeadBEEF'), - ]: - for fragment in testset.split(): - self.assertEqual([(kind, fragment), (Text, '\n')], - list(self.lexer.get_tokens(fragment + '\n'))) - for fragment in '01 0b2 0x129g2 0o12358'.split(): - self.assertEqual(next(self.lexer.get_tokens(fragment + '\n'))[0], - Error) +def test_macro(lexer): + fragment = ( + 'def<=>(other : self) : Int\n' + '{%for field in %w(first_name middle_name last_name)%}\n' + 'cmp={{field.id}}<=>other.{{field.id}}\n' + 'return cmp if cmp!=0\n' + '{%end%}\n' + '0\n' + 'end\n') + tokens = [ + (Keyword, 'def'), + (Name.Function, '<=>'), + (Punctuation, '('), + (Name, 'other'), + (Text, ' '), + (Punctuation, ':'), + (Text, ' '), + (Keyword.Pseudo, 'self'), + (Punctuation, ')'), + (Text, ' '), + (Punctuation, ':'), + (Text, ' '), + (Name.Builtin, 'Int'), + (Text, '\n'), + (String.Interpol, '{%'), + (Keyword, 'for'), + (Text, ' '), + (Name, 'field'), + (Text, ' '), + (Keyword, 'in'), + (Text, ' '), + (String.Other, '%w('), + (String.Other, 'first_name middle_name last_name'), + (String.Other, ')'), + (String.Interpol, '%}'), + (Text, '\n'), + (Name, 'cmp'), + (Operator, '='), + (String.Interpol, '{{'), + (Name, 'field'), + (Operator, '.'), + (Name, 'id'), + (String.Interpol, '}}'), + (Operator, '<=>'), + (Name, 'other'), + (Operator, '.'), + (String.Interpol, '{{'), + (Name, 'field'), + (Operator, '.'), + (Name, 'id'), + (String.Interpol, '}}'), + (Text, '\n'), + (Keyword, 'return'), + (Text, ' '), + (Name, 'cmp'), + (Text, ' '), + (Keyword, 'if'), + (Text, ' '), + (Name, 'cmp'), + (Operator, '!='), + (Number.Integer, '0'), + (Text, '\n'), + (String.Interpol, '{%'), + (Keyword, 'end'), + (String.Interpol, '%}'), + (Text, '\n'), + (Number.Integer, '0'), + (Text, '\n'), + (Keyword, 'end'), + (Text, '\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens - def testChars(self): - for fragment in ["'a'", "'я'", "'\\u{1234}'", "'\n'"]: - self.assertEqual([(String.Char, fragment), (Text, '\n')], - list(self.lexer.get_tokens(fragment + '\n'))) - self.assertEqual(next(self.lexer.get_tokens("'abc'"))[0], Error) - def testMacro(self): - fragment = ( - 'def<=>(other : self) : Int\n' - '{%for field in %w(first_name middle_name last_name)%}\n' - 'cmp={{field.id}}<=>other.{{field.id}}\n' - 'return cmp if cmp!=0\n' - '{%end%}\n' - '0\n' - 'end\n') - tokens = [ - (Keyword, 'def'), - (Name.Function, '<=>'), - (Punctuation, '('), - (Name, 'other'), - (Text, ' '), - (Punctuation, ':'), - (Text, ' '), - (Keyword.Pseudo, 'self'), - (Punctuation, ')'), - (Text, ' '), - (Punctuation, ':'), - (Text, ' '), - (Name.Builtin, 'Int'), - (Text, '\n'), - (String.Interpol, '{%'), - (Keyword, 'for'), - (Text, ' '), - (Name, 'field'), - (Text, ' '), - (Keyword, 'in'), - (Text, ' '), - (String.Other, '%w('), - (String.Other, 'first_name middle_name last_name'), - (String.Other, ')'), - (String.Interpol, '%}'), - (Text, '\n'), - (Name, 'cmp'), - (Operator, '='), - (String.Interpol, '{{'), - (Name, 'field'), - (Operator, '.'), - (Name, 'id'), - (String.Interpol, '}}'), - (Operator, '<=>'), - (Name, 'other'), - (Operator, '.'), - (String.Interpol, '{{'), - (Name, 'field'), - (Operator, '.'), - (Name, 'id'), - (String.Interpol, '}}'), - (Text, '\n'), - (Keyword, 'return'), - (Text, ' '), - (Name, 'cmp'), - (Text, ' '), - (Keyword, 'if'), - (Text, ' '), - (Name, 'cmp'), - (Operator, '!='), - (Number.Integer, '0'), - (Text, '\n'), - (String.Interpol, '{%'), - (Keyword, 'end'), - (String.Interpol, '%}'), - (Text, '\n'), - (Number.Integer, '0'), - (Text, '\n'), - (Keyword, 'end'), - (Text, '\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) +def test_lib(lexer): + fragment = ( + '@[Link("some")]\nlib LibSome\n' + '@[CallConvention("X86_StdCall")]\nfun foo="some.foo"(thing : Void*) : LibC::Int\n' + 'end\n') + tokens = [ + (Operator, '@['), + (Name.Decorator, 'Link'), + (Punctuation, '('), + (String.Double, '"'), + (String.Double, 'some'), + (String.Double, '"'), + (Punctuation, ')'), + (Operator, ']'), + (Text, '\n'), + (Keyword, 'lib'), + (Text, ' '), + (Name.Namespace, 'LibSome'), + (Text, '\n'), + (Operator, '@['), + (Name.Decorator, 'CallConvention'), + (Punctuation, '('), + (String.Double, '"'), + (String.Double, 'X86_StdCall'), + (String.Double, '"'), + (Punctuation, ')'), + (Operator, ']'), + (Text, '\n'), + (Keyword, 'fun'), + (Text, ' '), + (Name.Function, 'foo'), + (Operator, '='), + (String.Double, '"'), + (String.Double, 'some.foo'), + (String.Double, '"'), + (Punctuation, '('), + (Name, 'thing'), + (Text, ' '), + (Punctuation, ':'), + (Text, ' '), + (Name.Builtin, 'Void'), + (Operator, '*'), + (Punctuation, ')'), + (Text, ' '), + (Punctuation, ':'), + (Text, ' '), + (Name, 'LibC'), + (Operator, '::'), + (Name.Builtin, 'Int'), + (Text, '\n'), + (Keyword, 'end'), + (Text, '\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens - def testLib(self): - fragment = ( - '@[Link("some")]\nlib LibSome\n' - '@[CallConvention("X86_StdCall")]\nfun foo="some.foo"(thing : Void*) : LibC::Int\n' - 'end\n') - tokens = [ - (Operator, '@['), - (Name.Decorator, 'Link'), - (Punctuation, '('), - (String.Double, '"'), - (String.Double, 'some'), - (String.Double, '"'), - (Punctuation, ')'), - (Operator, ']'), - (Text, '\n'), - (Keyword, 'lib'), - (Text, ' '), - (Name.Namespace, 'LibSome'), - (Text, '\n'), - (Operator, '@['), - (Name.Decorator, 'CallConvention'), - (Punctuation, '('), - (String.Double, '"'), - (String.Double, 'X86_StdCall'), - (String.Double, '"'), - (Punctuation, ')'), - (Operator, ']'), - (Text, '\n'), - (Keyword, 'fun'), - (Text, ' '), - (Name.Function, 'foo'), - (Operator, '='), - (String.Double, '"'), - (String.Double, 'some.foo'), - (String.Double, '"'), - (Punctuation, '('), - (Name, 'thing'), - (Text, ' '), - (Punctuation, ':'), - (Text, ' '), - (Name.Builtin, 'Void'), - (Operator, '*'), - (Punctuation, ')'), - (Text, ' '), - (Punctuation, ':'), - (Text, ' '), - (Name, 'LibC'), - (Operator, '::'), - (Name.Builtin, 'Int'), - (Text, '\n'), - (Keyword, 'end'), - (Text, '\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - def testEscapedBracestring(self): - fragment = 'str.gsub(%r{\\\\\\\\}, "/")\n' - tokens = [ - (Name, 'str'), - (Operator, '.'), - (Name, 'gsub'), - (Punctuation, '('), - (String.Regex, '%r{'), - (String.Regex, '\\\\'), - (String.Regex, '\\\\'), - (String.Regex, '}'), - (Punctuation, ','), - (Text, ' '), - (String.Double, '"'), - (String.Double, '/'), - (String.Double, '"'), - (Punctuation, ')'), - (Text, '\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) +def test_escaped_bracestring(lexer): + fragment = 'str.gsub(%r{\\\\\\\\}, "/")\n' + tokens = [ + (Name, 'str'), + (Operator, '.'), + (Name, 'gsub'), + (Punctuation, '('), + (String.Regex, '%r{'), + (String.Regex, '\\\\'), + (String.Regex, '\\\\'), + (String.Regex, '}'), + (Punctuation, ','), + (Text, ' '), + (String.Double, '"'), + (String.Double, '/'), + (String.Double, '"'), + (Punctuation, ')'), + (Text, '\n'), + ] + assert list(lexer.get_tokens(fragment)) == tokens diff --git a/tests/test_csound.py b/tests/test_csound.py index 8a25391..7259cf5 100644 --- a/tests/test_csound.py +++ b/tests/test_csound.py @@ -1,491 +1,524 @@ # -*- coding: utf-8 -*- """ Csound lexer tests - ~~~~~~~~~~~~~~~~~~~~ + ~~~~~~~~~~~~~~~~~~ :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ -import unittest from textwrap import dedent -from pygments.token import Comment, Error, Keyword, Name, Number, Operator, Punctuation, \ - String, Text +import pytest + +from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \ + Punctuation, String, Text from pygments.lexers import CsoundOrchestraLexer -class CsoundOrchestraTest(unittest.TestCase): +@pytest.fixture(scope='module') +def lexer(): + yield CsoundOrchestraLexer() + + +def test_comments(lexer): + fragment = dedent('''\ + /* + * comment + */ + ; comment + // comment + ''') + tokens = [ + (Comment.Multiline, u'/*\n * comment\n */'), + (Text, u'\n'), + (Comment.Single, u'; comment'), + (Text, u'\n'), + (Comment.Single, u'// comment'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_instrument_blocks(lexer): + fragment = dedent('''\ + instr/**/1,/**/N_a_M_e_,/**/+Name/**/// + iDuration = p3 + outc:a(aSignal) + endin + ''') + tokens = [ + (Keyword.Declaration, u'instr'), + (Comment.Multiline, u'/**/'), + (Name.Function, u'1'), + (Punctuation, u','), + (Comment.Multiline, u'/**/'), + (Name.Function, u'N_a_M_e_'), + (Punctuation, u','), + (Comment.Multiline, u'/**/'), + (Punctuation, u'+'), + (Name.Function, u'Name'), + (Comment.Multiline, u'/**/'), + (Comment.Single, u'//'), + (Text, u'\n'), + (Text, u' '), + (Keyword.Type, u'i'), + (Name, u'Duration'), + (Text, u' '), + (Operator, u'='), + (Text, u' '), + (Name.Variable.Instance, u'p3'), + (Text, u'\n'), + (Text, u' '), + (Name.Builtin, u'outc'), + (Punctuation, u':'), + (Keyword.Type, u'a'), + (Punctuation, u'('), + (Keyword.Type, u'a'), + (Name, u'Signal'), + (Punctuation, u')'), + (Text, u'\n'), + (Keyword.Declaration, u'endin'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens - def setUp(self): - self.lexer = CsoundOrchestraLexer() - self.maxDiff = None - def testComments(self): - fragment = dedent('''\ - /* - * comment - */ - ; comment - // comment - ''') +def test_user_defined_opcodes(lexer): + fragment = dedent('''\ + opcode/**/aUDO,/**/i[],/**/aik// + aUDO + endop + ''') + tokens = [ + (Keyword.Declaration, u'opcode'), + (Comment.Multiline, u'/**/'), + (Name.Function, u'aUDO'), + (Punctuation, u','), + (Comment.Multiline, u'/**/'), + (Keyword.Type, u'i[]'), + (Punctuation, u','), + (Comment.Multiline, u'/**/'), + (Keyword.Type, u'aik'), + (Comment.Single, u'//'), + (Text, u'\n'), + (Text, u' '), + (Name.Function, u'aUDO'), + (Text, u'\n'), + (Keyword.Declaration, u'endop'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_numbers(lexer): + fragment = '123 0123456789' + tokens = [ + (Number.Integer, u'123'), + (Text, u' '), + (Number.Integer, u'0123456789'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + fragment = '0xabcdef0123456789 0XABCDEF' + tokens = [ + (Keyword.Type, u'0x'), + (Number.Hex, u'abcdef0123456789'), + (Text, u' '), + (Keyword.Type, u'0X'), + (Number.Hex, u'ABCDEF'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + fragments = ['1e2', '3e+4', '5e-6', '7E8', '9E+0', '1E-2', '3.', '4.56', '.789'] + for fragment in fragments: tokens = [ - (Comment.Multiline, u'/*\n * comment\n */'), - (Text, u'\n'), - (Comment.Single, u'; comment'), - (Text, u'\n'), - (Comment.Single, u'// comment'), + (Number.Float, fragment), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testInstrumentBlocks(self): - fragment = dedent('''\ - instr/**/1,/**/N_a_M_e_,/**/+Name/**/// - iDuration = p3 - outc:a(aSignal) - endin - ''') + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_quoted_strings(lexer): + fragment = '"characters$MACRO."' + tokens = [ + (String, u'"'), + (String, u'characters'), + (Comment.Preproc, u'$MACRO.'), + (String, u'"'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_braced_strings(lexer): + fragment = dedent('''\ + {{ + characters$MACRO. + }} + ''') + tokens = [ + (String, u'{{'), + (String, u'\ncharacters$MACRO.\n'), + (String, u'}}'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_escape_sequences(lexer): + for character in ['\\', 'a', 'b', 'n', 'r', 't', '"', '012', '345', '67']: + escapedCharacter = '\\' + character + fragment = '"' + escapedCharacter + '"' tokens = [ - (Keyword.Declaration, u'instr'), - (Comment.Multiline, u'/**/'), - (Name.Function, u'1'), - (Punctuation, u','), - (Comment.Multiline, u'/**/'), - (Name.Function, u'N_a_M_e_'), - (Punctuation, u','), - (Comment.Multiline, u'/**/'), - (Punctuation, u'+'), - (Name.Function, u'Name'), - (Comment.Multiline, u'/**/'), - (Comment.Single, u'//'), - (Text, u'\n'), - (Text, u' '), - (Keyword.Type, u'i'), - (Name, u'Duration'), - (Text, u' '), - (Operator, u'='), - (Text, u' '), - (Name.Variable.Instance, u'p3'), - (Text, u'\n'), - (Text, u' '), - (Name.Builtin, u'outc'), - (Punctuation, u':'), - (Keyword.Type, u'a'), - (Punctuation, u'('), - (Keyword.Type, u'a'), - (Name, u'Signal'), - (Punctuation, u')'), - (Text, u'\n'), - (Keyword.Declaration, u'endin'), + (String, u'"'), + (String.Escape, escapedCharacter), + (String, u'"'), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testUserDefinedOpcodes(self): - fragment = dedent('''\ - opcode/**/aUDO,/**/i[],/**/aik// - aUDO - endop - ''') + assert list(lexer.get_tokens(fragment)) == tokens + fragment = '{{' + escapedCharacter + '}}' tokens = [ - (Keyword.Declaration, u'opcode'), - (Comment.Multiline, u'/**/'), - (Name.Function, u'aUDO'), - (Punctuation, u','), - (Comment.Multiline, u'/**/'), - (Keyword.Type, u'i[]'), - (Punctuation, u','), - (Comment.Multiline, u'/**/'), - (Keyword.Type, u'aik'), - (Comment.Single, u'//'), - (Text, u'\n'), - (Text, u' '), - (Name.Function, u'aUDO'), - (Text, u'\n'), - (Keyword.Declaration, u'endop'), + (String, u'{{'), + (String.Escape, escapedCharacter), + (String, u'}}'), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + assert list(lexer.get_tokens(fragment)) == tokens - def testNumbers(self): - fragment = '123 0123456789' + +def test_operators(lexer): + fragments = ['+', '-', '~', u'¬', '!', '*', '/', '^', '%', '<<', '>>', '<', '>', + '<=', '>=', '==', '!=', '&', '#', '|', '&&', '||', '?', ':', '+=', + '-=', '*=', '/='] + for fragment in fragments: tokens = [ - (Number.Integer, u'123'), - (Text, u' '), - (Number.Integer, u'0123456789'), + (Operator, fragment), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - fragment = '0xabcdef0123456789 0XABCDEF' + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_global_value_identifiers(lexer): + for fragment in ['0dbfs', 'A4', 'kr', 'ksmps', 'nchnls', 'nchnls_i', 'sr']: tokens = [ - (Keyword.Type, u'0x'), - (Number.Hex, u'abcdef0123456789'), - (Text, u' '), - (Keyword.Type, u'0X'), - (Number.Hex, u'ABCDEF'), + (Name.Variable.Global, fragment), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - fragments = ['1e2', '3e+4', '5e-6', '7E8', '9E+0', '1E-2', '3.', '4.56', '.789'] - for fragment in fragments: - tokens = [ - (Number.Float, fragment), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + assert list(lexer.get_tokens(fragment)) == tokens + - def testQuotedStrings(self): - fragment = '"characters$MACRO."' +def test_keywords(lexer): + fragments = ['do', 'else', 'elseif', 'endif', 'enduntil', 'fi', 'if', 'ithen', + 'kthen', 'od', 'then', 'until', 'while'] + for fragment in fragments: tokens = [ - (String, u'"'), - (String, u'characters'), - (Comment.Preproc, u'$MACRO.'), - (String, u'"'), + (Keyword, fragment), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testBracedStrings(self): - fragment = dedent('''\ - {{ - characters$MACRO. - }} - ''') + assert list(lexer.get_tokens(fragment)) == tokens + for fragment in ['return', 'rireturn']: tokens = [ - (String, u'{{'), - (String, u'\ncharacters$MACRO.\n'), - (String, u'}}'), + (Keyword.Pseudo, fragment), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_labels(lexer): + fragment = dedent('''\ + aLabel: + label2: + ''') + tokens = [ + (Name.Label, u'aLabel'), + (Punctuation, u':'), + (Text, u'\n'), + (Text, u' '), + (Name.Label, u'label2'), + (Punctuation, u':'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens - def testEscapeSequences(self): - for character in ['\\', 'a', 'b', 'n', 'r', 't', '"', '012', '345', '67']: - escapedCharacter = '\\' + character - fragment = '"' + escapedCharacter + '"' + +def test_printks_and_prints_escape_sequences(lexer): + escapedCharacters = ['%!', '%%', '%n', '%N', '%r', '%R', '%t', '%T', '\\\\a', + '\\\\A', '\\\\b', '\\\\B', '\\\\n', '\\\\N', '\\\\r', + '\\\\R', '\\\\t', '\\\\T'] + for opcode in ['printks', 'prints']: + for escapedCharacter in escapedCharacters: + fragment = opcode + ' "' + escapedCharacter + '"' tokens = [ + (Name.Builtin, opcode), + (Text, u' '), (String, u'"'), (String.Escape, escapedCharacter), (String, u'"'), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - fragment = '{{' + escapedCharacter + '}}' - tokens = [ - (String, u'{{'), - (String.Escape, escapedCharacter), - (String, u'}}'), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + assert list(lexer.get_tokens(fragment)) == tokens - def testOperators(self): - fragments = ['+', '-', '~', u'¬', '!', '*', '/', '^', '%', '<<', '>>', '<', '>', - '<=', '>=', '==', '!=', '&', '#', '|', '&&', '||', '?', ':', '+=', - '-=', '*=', '/='] - for fragment in fragments: - tokens = [ - (Operator, fragment), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - def testGlobalValueIdentifiers(self): - for fragment in ['0dbfs', 'A4', 'kr', 'ksmps', 'nchnls', 'nchnls_i', 'sr']: - tokens = [ - (Name.Variable.Global, fragment), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testKeywords(self): - fragments = ['do', 'else', 'elseif', 'endif', 'enduntil', 'fi', 'if', 'ithen', - 'kthen', 'od', 'then', 'until', 'while'] - for fragment in fragments: - tokens = [ - (Keyword, fragment), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - for fragment in ['return', 'rireturn']: - tokens = [ - (Keyword.Pseudo, fragment), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testLabels(self): - fragment = dedent('''\ - aLabel: - label2: - ''') +def test_goto_statements(lexer): + for keyword in ['goto', 'igoto', 'kgoto']: + fragment = keyword + ' aLabel' tokens = [ - (Name.Label, u'aLabel'), - (Punctuation, u':'), - (Text, u'\n'), + (Keyword, keyword), (Text, u' '), - (Name.Label, u'label2'), - (Punctuation, u':'), + (Name.Label, u'aLabel'), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testPrintksAndPrintsEscapeSequences(self): - escapedCharacters = ['%!', '%%', '%n', '%N', '%r', '%R', '%t', '%T', '\\\\a', - '\\\\A', '\\\\b', '\\\\B', '\\\\n', '\\\\N', '\\\\r', - '\\\\R', '\\\\t', '\\\\T'] - for opcode in ['printks', 'prints']: - for escapedCharacter in escapedCharacters: - fragment = opcode + ' "' + escapedCharacter + '"' - tokens = [ - (Name.Builtin, opcode), - (Text, u' '), - (String, u'"'), - (String.Escape, escapedCharacter), - (String, u'"'), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testGotoStatements(self): - for keyword in ['goto', 'igoto', 'kgoto']: - fragment = keyword + ' aLabel' - tokens = [ - (Keyword, keyword), - (Text, u' '), - (Name.Label, u'aLabel'), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - for opcode in ['reinit', 'rigoto', 'tigoto']: - fragment = opcode + ' aLabel' - tokens = [ - (Keyword.Pseudo, opcode), - (Text, u' '), - (Name.Label, u'aLabel'), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - for opcode in ['cggoto', 'cigoto', 'cingoto', 'ckgoto', 'cngoto', 'cnkgoto']: - fragment = opcode + ' 1==0, aLabel' - tokens = [ - (Keyword.Pseudo, opcode), - (Text, u' '), - (Number.Integer, u'1'), - (Operator, u'=='), - (Number.Integer, u'0'), - (Punctuation, u','), - (Text, u' '), - (Name.Label, u'aLabel'), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - fragment = 'timout 0, 0, aLabel' + assert list(lexer.get_tokens(fragment)) == tokens + for opcode in ['reinit', 'rigoto', 'tigoto']: + fragment = opcode + ' aLabel' tokens = [ - (Keyword.Pseudo, 'timout'), + (Keyword.Pseudo, opcode), (Text, u' '), - (Number.Integer, u'0'), - (Punctuation, u','), + (Name.Label, u'aLabel'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + for opcode in ['cggoto', 'cigoto', 'cingoto', 'ckgoto', 'cngoto', 'cnkgoto']: + fragment = opcode + ' 1==0, aLabel' + tokens = [ + (Keyword.Pseudo, opcode), (Text, u' '), + (Number.Integer, u'1'), + (Operator, u'=='), (Number.Integer, u'0'), (Punctuation, u','), (Text, u' '), (Name.Label, u'aLabel'), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - for opcode in ['loop_ge', 'loop_gt', 'loop_le', 'loop_lt']: - fragment = opcode + ' 0, 0, 0, aLabel' - tokens = [ - (Keyword.Pseudo, opcode), - (Text, u' '), - (Number.Integer, u'0'), - (Punctuation, u','), - (Text, u' '), - (Number.Integer, u'0'), - (Punctuation, u','), - (Text, u' '), - (Number.Integer, u'0'), - (Punctuation, u','), - (Text, u' '), - (Name.Label, u'aLabel'), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testIncludeDirectives(self): - for character in ['"', '|']: - fragment = '#include/**/' + character + 'file.udo' + character - tokens = [ - (Comment.Preproc, u'#include'), - (Comment.Multiline, u'/**/'), - (String, character + u'file.udo' + character), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testObjectLikeMacroDefinitions(self): - fragment = dedent('''\ - # \tdefine MACRO#macro_body# - #define/**/ - MACRO/**/ - #\\#macro - body\\## - ''') + assert list(lexer.get_tokens(fragment)) == tokens + fragment = 'timout 0, 0, aLabel' + tokens = [ + (Keyword.Pseudo, 'timout'), + (Text, u' '), + (Number.Integer, u'0'), + (Punctuation, u','), + (Text, u' '), + (Number.Integer, u'0'), + (Punctuation, u','), + (Text, u' '), + (Name.Label, u'aLabel'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + for opcode in ['loop_ge', 'loop_gt', 'loop_le', 'loop_lt']: + fragment = opcode + ' 0, 0, 0, aLabel' tokens = [ - (Comment.Preproc, u'# \tdefine'), + (Keyword.Pseudo, opcode), (Text, u' '), - (Comment.Preproc, u'MACRO'), - (Punctuation, u'#'), - (Comment.Preproc, u'macro_body'), - (Punctuation, u'#'), - (Text, u'\n'), - (Comment.Preproc, u'#define'), - (Comment.Multiline, u'/**/'), - (Text, u'\n'), - (Comment.Preproc, u'MACRO'), - (Comment.Multiline, u'/**/'), - (Text, u'\n'), - (Punctuation, u'#'), - (Comment.Preproc, u'\\#'), - (Comment.Preproc, u'macro\nbody'), - (Comment.Preproc, u'\\#'), - (Punctuation, u'#'), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testFunctionLikeMacroDefinitions(self): - fragment = dedent('''\ - #define MACRO(ARG1#ARG2) #macro_body# - #define/**/ - MACRO(ARG1'ARG2' ARG3)/**/ - #\\#macro - body\\## - ''') - tokens = [ - (Comment.Preproc, u'#define'), + (Number.Integer, u'0'), + (Punctuation, u','), (Text, u' '), - (Comment.Preproc, u'MACRO'), - (Punctuation, u'('), - (Comment.Preproc, u'ARG1'), - (Punctuation, u'#'), - (Comment.Preproc, u'ARG2'), - (Punctuation, u')'), + (Number.Integer, u'0'), + (Punctuation, u','), (Text, u' '), - (Punctuation, u'#'), - (Comment.Preproc, u'macro_body'), - (Punctuation, u'#'), - (Text, u'\n'), - (Comment.Preproc, u'#define'), - (Comment.Multiline, u'/**/'), - (Text, u'\n'), - (Comment.Preproc, u'MACRO'), - (Punctuation, u'('), - (Comment.Preproc, u'ARG1'), - (Punctuation, u"'"), - (Comment.Preproc, u'ARG2'), - (Punctuation, u"'"), + (Number.Integer, u'0'), + (Punctuation, u','), (Text, u' '), - (Comment.Preproc, u'ARG3'), - (Punctuation, u')'), - (Comment.Multiline, u'/**/'), - (Text, u'\n'), - (Punctuation, u'#'), - (Comment.Preproc, u'\\#'), - (Comment.Preproc, u'macro\nbody'), - (Comment.Preproc, u'\\#'), - (Punctuation, u'#'), + (Name.Label, u'aLabel'), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + assert list(lexer.get_tokens(fragment)) == tokens - def testMacroPreprocessorDirectives(self): - for directive in ['#ifdef', '#ifndef', '#undef']: - fragment = directive + ' MACRO' - tokens = [ - (Comment.Preproc, directive), - (Text, u' '), - (Comment.Preproc, u'MACRO'), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testOtherPreprocessorDirectives(self): - fragment = dedent('''\ - #else - #end - #endif - ### - @ \t12345 - @@ \t67890 - ''') + +def test_include_directives(lexer): + for character in ['"', '|']: + fragment = '#include/**/' + character + 'file.udo' + character tokens = [ - (Comment.Preproc, u'#else'), - (Text, u'\n'), - (Comment.Preproc, u'#end'), - (Text, u'\n'), - (Comment.Preproc, u'#endif'), - (Text, u'\n'), - (Comment.Preproc, u'###'), - (Text, u'\n'), - (Comment.Preproc, u'@ \t12345'), - (Text, u'\n'), - (Comment.Preproc, u'@@ \t67890'), + (Comment.Preproc, u'#include'), + (Comment.Multiline, u'/**/'), + (String, character + u'file.udo' + character), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_includestr_directives(lexer): + fragment = '#includestr/**/"$MACRO..udo"' + tokens = [ + (Comment.Preproc, u'#includestr'), + (Comment.Multiline, u'/**/'), + (String, u'"'), + (Comment.Preproc, u'$MACRO.'), + (String, u'.udo'), + (String, u'"'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + - def testFunctionLikeMacros(self): - fragment = "$MACRO.(((x#y\\)))' \"(#'x)\\)x\\))\"# {{x\\))x)\\)(#'}});" +def test_object_like_macro_definitions(lexer): + fragment = dedent('''\ + # \tdefine MACRO#macro_body# + #define/**/ + MACRO/**/ + #\\#macro + body\\## + ''') + tokens = [ + (Comment.Preproc, u'# \tdefine'), + (Text, u' '), + (Comment.Preproc, u'MACRO'), + (Punctuation, u'#'), + (Comment.Preproc, u'macro_body'), + (Punctuation, u'#'), + (Text, u'\n'), + (Comment.Preproc, u'#define'), + (Comment.Multiline, u'/**/'), + (Text, u'\n'), + (Comment.Preproc, u'MACRO'), + (Comment.Multiline, u'/**/'), + (Text, u'\n'), + (Punctuation, u'#'), + (Comment.Preproc, u'\\#'), + (Comment.Preproc, u'macro\nbody'), + (Comment.Preproc, u'\\#'), + (Punctuation, u'#'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_function_like_macro_definitions(lexer): + fragment = dedent('''\ + #define MACRO(ARG1#ARG2) #macro_body# + #define/**/ + MACRO(ARG1'ARG2' ARG3)/**/ + #\\#macro + body\\## + ''') + tokens = [ + (Comment.Preproc, u'#define'), + (Text, u' '), + (Comment.Preproc, u'MACRO'), + (Punctuation, u'('), + (Comment.Preproc, u'ARG1'), + (Punctuation, u'#'), + (Comment.Preproc, u'ARG2'), + (Punctuation, u')'), + (Text, u' '), + (Punctuation, u'#'), + (Comment.Preproc, u'macro_body'), + (Punctuation, u'#'), + (Text, u'\n'), + (Comment.Preproc, u'#define'), + (Comment.Multiline, u'/**/'), + (Text, u'\n'), + (Comment.Preproc, u'MACRO'), + (Punctuation, u'('), + (Comment.Preproc, u'ARG1'), + (Punctuation, u"'"), + (Comment.Preproc, u'ARG2'), + (Punctuation, u"'"), + (Text, u' '), + (Comment.Preproc, u'ARG3'), + (Punctuation, u')'), + (Comment.Multiline, u'/**/'), + (Text, u'\n'), + (Punctuation, u'#'), + (Comment.Preproc, u'\\#'), + (Comment.Preproc, u'macro\nbody'), + (Comment.Preproc, u'\\#'), + (Punctuation, u'#'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_macro_preprocessor_directives(lexer): + for directive in ['#ifdef', '#ifndef', '#undef']: + fragment = directive + ' MACRO' tokens = [ - (Comment.Preproc, u'$MACRO.'), - (Punctuation, u'('), - (Comment.Preproc, u'('), - (Comment.Preproc, u'('), - (Comment.Preproc, u'x#y\\)'), - (Comment.Preproc, u')'), - (Comment.Preproc, u')'), - (Punctuation, u"'"), - (Comment.Preproc, u' '), - (String, u'"'), - (Error, u'('), - (Error, u'#'), - (Error, u"'"), - (String, u'x'), - (Error, u')'), - (Comment.Preproc, u'\\)'), - (String, u'x'), - (Comment.Preproc, u'\\)'), - (Error, u')'), - (String, u'"'), - (Punctuation, u'#'), - (Comment.Preproc, u' '), - (String, u'{{'), - (String, u'x'), - (Comment.Preproc, u'\\)'), - (Error, u')'), - (String, u'x'), - (Error, u')'), - (Comment.Preproc, u'\\)'), - (Error, u'('), - (Error, u'#'), - (Error, u"'"), - (String, u'}}'), - (Punctuation, u')'), - (Comment.Single, u';'), + (Comment.Preproc, directive), + (Text, u' '), + (Comment.Preproc, u'MACRO'), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + assert list(lexer.get_tokens(fragment)) == tokens - def testName(self): - fragment = 'kG:V' - tokens = [ - (Keyword.Type, 'k'), - (Name, 'G'), - (Punctuation, ':'), - (Name, 'V'), - (Text, '\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + +def test_other_preprocessor_directives(lexer): + fragment = dedent('''\ + #else + #end + #endif + ### + @ \t12345 + @@ \t67890 + ''') + tokens = [ + (Comment.Preproc, u'#else'), + (Text, u'\n'), + (Comment.Preproc, u'#end'), + (Text, u'\n'), + (Comment.Preproc, u'#endif'), + (Text, u'\n'), + (Comment.Preproc, u'###'), + (Text, u'\n'), + (Comment.Preproc, u'@ \t12345'), + (Text, u'\n'), + (Comment.Preproc, u'@@ \t67890'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_function_like_macros(lexer): + fragment = "$MACRO.(((x#y\\)))' \"(#'x)\\)x\\))\"# {{x\\))x)\\)(#'}});" + tokens = [ + (Comment.Preproc, u'$MACRO.'), + (Punctuation, u'('), + (Comment.Preproc, u'('), + (Comment.Preproc, u'('), + (Comment.Preproc, u'x#y\\)'), + (Comment.Preproc, u')'), + (Comment.Preproc, u')'), + (Punctuation, u"'"), + (Comment.Preproc, u' '), + (String, u'"'), + (Error, u'('), + (Error, u'#'), + (Error, u"'"), + (String, u'x'), + (Error, u')'), + (Comment.Preproc, u'\\)'), + (String, u'x'), + (Comment.Preproc, u'\\)'), + (Error, u')'), + (String, u'"'), + (Punctuation, u'#'), + (Comment.Preproc, u' '), + (String, u'{{'), + (String, u'x'), + (Comment.Preproc, u'\\)'), + (Error, u')'), + (String, u'x'), + (Error, u')'), + (Comment.Preproc, u'\\)'), + (Error, u'('), + (Error, u'#'), + (Error, u"'"), + (String, u'}}'), + (Punctuation, u')'), + (Comment.Single, u';'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_name(lexer): + fragment = 'kG:V' + tokens = [ + (Keyword.Type, 'k'), + (Name, 'G'), + (Punctuation, ':'), + (Name, 'V'), + (Text, '\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens diff --git a/tests/test_data.py b/tests/test_data.py index be37141..23f1d4a 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -7,94 +7,118 @@ :license: BSD, see LICENSE for details. """ -import unittest +import pytest -from pygments.lexers import JsonLexer, JsonBareObjectLexer +from pygments.lexers import JsonLexer, JsonBareObjectLexer, YamlLexer from pygments.token import Token -class JsonTest(unittest.TestCase): - def setUp(self): - self.lexer = JsonLexer() - - def testBasic(self): - fragment = u'{"foo": "bar", "foo2": [1, 2, 3]}\n' - tokens = [ - (Token.Punctuation, u'{'), - (Token.Name.Tag, u'"foo"'), - (Token.Punctuation, u':'), - (Token.Text, u' '), - (Token.Literal.String.Double, u'"bar"'), - (Token.Punctuation, u','), - (Token.Text, u' '), - (Token.Name.Tag, u'"foo2"'), - (Token.Punctuation, u':'), - (Token.Text, u' '), - (Token.Punctuation, u'['), - (Token.Literal.Number.Integer, u'1'), - (Token.Punctuation, u','), - (Token.Text, u' '), - (Token.Literal.Number.Integer, u'2'), - (Token.Punctuation, u','), - (Token.Text, u' '), - (Token.Literal.Number.Integer, u'3'), - (Token.Punctuation, u']'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - -class JsonBareObjectTest(unittest.TestCase): - def setUp(self): - self.lexer = JsonBareObjectLexer() - - def testBasic(self): - # This is the same as testBasic for JsonLexer above, except the - # enclosing curly braces are removed. - fragment = u'"foo": "bar", "foo2": [1, 2, 3]\n' - tokens = [ - (Token.Name.Tag, u'"foo"'), - (Token.Punctuation, u':'), - (Token.Text, u' '), - (Token.Literal.String.Double, u'"bar"'), - (Token.Punctuation, u','), - (Token.Text, u' '), - (Token.Name.Tag, u'"foo2"'), - (Token.Punctuation, u':'), - (Token.Text, u' '), - (Token.Punctuation, u'['), - (Token.Literal.Number.Integer, u'1'), - (Token.Punctuation, u','), - (Token.Text, u' '), - (Token.Literal.Number.Integer, u'2'), - (Token.Punctuation, u','), - (Token.Text, u' '), - (Token.Literal.Number.Integer, u'3'), - (Token.Punctuation, u']'), - (Token.Text, u'\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testClosingCurly(self): - # This can be an Error token, but should not be a can't-pop-from-stack - # exception. - fragment = '}"a"\n' - tokens = [ - (Token.Error, '}'), - (Token.Name.Tag, '"a"'), - (Token.Text, '\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testClosingCurlyInValue(self): - fragment = '"": ""}\n' - tokens = [ - (Token.Name.Tag, '""'), - (Token.Punctuation, ':'), - (Token.Text, ' '), - (Token.Literal.String.Double, '""'), - (Token.Error, '}'), - (Token.Text, '\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) +@pytest.fixture(scope='module') +def lexer_json(): + yield JsonLexer() + +@pytest.fixture(scope='module') +def lexer_bare(): + yield JsonBareObjectLexer() + + +@pytest.fixture(scope='module') +def lexer_yaml(): + yield YamlLexer() + + +def test_basic_json(lexer_json): + fragment = u'{"foo": "bar", "foo2": [1, 2, 3]}\n' + tokens = [ + (Token.Punctuation, u'{'), + (Token.Name.Tag, u'"foo"'), + (Token.Punctuation, u':'), + (Token.Text, u' '), + (Token.Literal.String.Double, u'"bar"'), + (Token.Punctuation, u','), + (Token.Text, u' '), + (Token.Name.Tag, u'"foo2"'), + (Token.Punctuation, u':'), + (Token.Text, u' '), + (Token.Punctuation, u'['), + (Token.Literal.Number.Integer, u'1'), + (Token.Punctuation, u','), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'2'), + (Token.Punctuation, u','), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'3'), + (Token.Punctuation, u']'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + assert list(lexer_json.get_tokens(fragment)) == tokens + + +def test_basic_bare(lexer_bare): + # This is the same as testBasic for JsonLexer above, except the + # enclosing curly braces are removed. + fragment = u'"foo": "bar", "foo2": [1, 2, 3]\n' + tokens = [ + (Token.Name.Tag, u'"foo"'), + (Token.Punctuation, u':'), + (Token.Text, u' '), + (Token.Literal.String.Double, u'"bar"'), + (Token.Punctuation, u','), + (Token.Text, u' '), + (Token.Name.Tag, u'"foo2"'), + (Token.Punctuation, u':'), + (Token.Text, u' '), + (Token.Punctuation, u'['), + (Token.Literal.Number.Integer, u'1'), + (Token.Punctuation, u','), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'2'), + (Token.Punctuation, u','), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'3'), + (Token.Punctuation, u']'), + (Token.Text, u'\n'), + ] + assert list(lexer_bare.get_tokens(fragment)) == tokens + + +def test_closing_curly(lexer_bare): + # This can be an Error token, but should not be a can't-pop-from-stack + # exception. + fragment = '}"a"\n' + tokens = [ + (Token.Error, '}'), + (Token.Name.Tag, '"a"'), + (Token.Text, '\n'), + ] + assert list(lexer_bare.get_tokens(fragment)) == tokens + + +def test_closing_curly_in_value(lexer_bare): + fragment = '"": ""}\n' + tokens = [ + (Token.Name.Tag, '""'), + (Token.Punctuation, ':'), + (Token.Text, ' '), + (Token.Literal.String.Double, '""'), + (Token.Error, '}'), + (Token.Text, '\n'), + ] + assert list(lexer_bare.get_tokens(fragment)) == tokens + + +def test_yaml(lexer_yaml): + # Bug #1528: This previously parsed 'token # innocent' as a tag + fragment = u'here: token # innocent: comment\n' + tokens = [ + (Token.Name.Tag, u'here'), + (Token.Punctuation, u':'), + (Token.Text, u' '), + (Token.Literal.Scalar.Plain, u'token'), + (Token.Text, u' '), + (Token.Comment.Single, u'# innocent: comment'), + (Token.Text, u'\n'), + ] + assert list(lexer_yaml.get_tokens(fragment)) == tokens diff --git a/tests/test_examplefiles.py b/tests/test_examplefiles.py index e208403..491c1e0 100644 --- a/tests/test_examplefiles.py +++ b/tests/test_examplefiles.py @@ -14,12 +14,15 @@ import pprint import difflib import pickle +import pytest + from pygments.lexers import get_lexer_for_filename, get_lexer_by_name from pygments.token import Error from pygments.util import ClassNotFound -import support - +# You can set this to True to store the exact token type output of example +# files in tests/examplefiles/output, and on the next run the test will +# want them to stay the same. In the repository, this should stay False. STORE_OUTPUT = False STATS = {} @@ -32,9 +35,11 @@ TESTDIR = os.path.dirname(__file__) BAD_FILES_FOR_JYTHON = ('Object.st', 'all.nit', 'genclass.clj', 'ragel-cpp_rlscan') -def test_example_files(): - global STATS - STATS = {} + +def get_example_files(): + # TODO: move stats to a fixture + # global STATS + # STATS = {} outdir = os.path.join(TESTDIR, 'examplefiles', 'output') if STORE_OUTPUT and not os.path.isdir(outdir): os.makedirs(outdir) @@ -51,48 +56,49 @@ def test_example_files(): continue print(absfn) - with open(absfn, 'rb') as f: - code = f.read() + yield fn + + # N = 7 + # stats = list(STATS.items()) + # stats.sort(key=lambda x: x[1][1]) + # print('\nExample files that took longest absolute time:') + # for fn, t in stats[-N:]: + # print('%-30s %6d chars %8.2f ms %7.3f ms/char' % ((fn,) + t)) + # print() + # stats.sort(key=lambda x: x[1][2]) + # print('\nExample files that took longest relative time:') + # for fn, t in stats[-N:]: + # print('%-30s %6d chars %8.2f ms %7.3f ms/char' % ((fn,) + t)) + + +@pytest.mark.parametrize('filename', get_example_files()) +def test_examplefile(filename): + if os.name == 'java' and filename in BAD_FILES_FOR_JYTHON: + pytest.skip('%s is a known bad file on Jython' % filename) + + absfn = os.path.join(TESTDIR, 'examplefiles', filename) + with open(absfn, 'rb') as f: + text = f.read() + try: + utext = text.decode('utf-8') + except UnicodeError: + utext = text.decode('latin1') + + lx = None + if '_' in filename: try: - code = code.decode('utf-8') - except UnicodeError: - code = code.decode('latin1') - - lx = None - if '_' in fn: - try: - lx = get_lexer_by_name(fn.split('_')[0]) - except ClassNotFound: - pass - if lx is None: - try: - lx = get_lexer_for_filename(absfn, code=code) - except ClassNotFound: - raise AssertionError('file %r has no registered extension, ' - 'nor is of the form _filename ' - 'for overriding, thus no lexer found.' - % fn) - yield check_lexer, lx, fn - - N = 7 - stats = list(STATS.items()) - stats.sort(key=lambda x: x[1][1]) - print('\nExample files that took longest absolute time:') - for fn, t in stats[-N:]: - print('%-30s %6d chars %8.2f ms %7.3f ms/char' % ((fn,) + t)) - print() - stats.sort(key=lambda x: x[1][2]) - print('\nExample files that took longest relative time:') - for fn, t in stats[-N:]: - print('%-30s %6d chars %8.2f ms %7.3f ms/char' % ((fn,) + t)) - - -def check_lexer(lx, fn): - if os.name == 'java' and fn in BAD_FILES_FOR_JYTHON: - raise support.SkipTest('%s is a known bad file on Jython' % fn) - absfn = os.path.join(TESTDIR, 'examplefiles', fn) - with open(absfn, 'rb') as fp: - text = fp.read() + lx = get_lexer_by_name(filename.split('_')[0]) + except ClassNotFound: + pass + if lx is None: + try: + lx = get_lexer_for_filename(absfn, code=utext) + except ClassNotFound: + raise AssertionError('file %r has no registered extension, ' + 'nor is of the form _filename ' + 'for overriding, thus no lexer found.' + % filename) + text = text.replace(b'\r\n', b'\n') text = text.strip(b'\n') + b'\n' try: @@ -122,7 +128,7 @@ def check_lexer(lx, fn): # check output against previous run if enabled if STORE_OUTPUT: # no previous output -- store it - outfn = os.path.join(TESTDIR, 'examplefiles', 'output', fn) + outfn = os.path.join(TESTDIR, 'examplefiles', 'output', filename) if not os.path.isfile(outfn): with open(outfn, 'wb') as fp: pickle.dump(tokens, fp) diff --git a/tests/test_ezhil.py b/tests/test_ezhil.py index 15cc13b..8047a30 100644 --- a/tests/test_ezhil.py +++ b/tests/test_ezhil.py @@ -7,177 +7,171 @@ :license: BSD, see LICENSE for details. """ -import unittest +import pytest from pygments.token import Operator, Number, Text, Token from pygments.lexers import EzhilLexer -class EzhilTest(unittest.TestCase): +@pytest.fixture(scope='module') +def lexer(): + yield EzhilLexer() - def setUp(self): - self.lexer = EzhilLexer() - self.maxDiff = None - - def testSum(self): - fragment = u'1+3\n' - tokens = [ - (Number.Integer, u'1'), - (Operator, u'+'), - (Number.Integer, u'3'), - (Text, u'\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testGCDExpr(self): - fragment = u'1^3+(5-5)*gcd(a,b)\n' - tokens = [ - (Token.Number.Integer,u'1'), - (Token.Operator,u'^'), - (Token.Literal.Number.Integer, u'3'), - (Token.Operator, u'+'), - (Token.Punctuation, u'('), - (Token.Literal.Number.Integer, u'5'), - (Token.Operator, u'-'), - (Token.Literal.Number.Integer, u'5'), - (Token.Punctuation, u')'), - (Token.Operator, u'*'), - (Token.Name, u'gcd'), - (Token.Punctuation, u'('), - (Token.Name, u'a'), - (Token.Operator, u','), - (Token.Name, u'b'), - (Token.Punctuation, u')'), - (Token.Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - def testIfStatement(self): - fragment = u"""@( 0 > 3 ) ஆனால் - பதிப்பி "wont print" +def test_sum(lexer): + fragment = u'1+3\n' + tokens = [ + (Number.Integer, u'1'), + (Operator, u'+'), + (Number.Integer, u'3'), + (Text, u'\n'), + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_gcd_expr(lexer): + fragment = u'1^3+(5-5)*gcd(a,b)\n' + tokens = [ + (Token.Number.Integer, u'1'), + (Token.Operator, u'^'), + (Token.Literal.Number.Integer, u'3'), + (Token.Operator, u'+'), + (Token.Punctuation, u'('), + (Token.Literal.Number.Integer, u'5'), + (Token.Operator, u'-'), + (Token.Literal.Number.Integer, u'5'), + (Token.Punctuation, u')'), + (Token.Operator, u'*'), + (Token.Name, u'gcd'), + (Token.Punctuation, u'('), + (Token.Name, u'a'), + (Token.Operator, u','), + (Token.Name, u'b'), + (Token.Punctuation, u')'), + (Token.Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_if_statement(lexer): + fragment = u"""@( 0 > 3 ) ஆனால் + பதிப்பி "wont print" முடி""" - tokens = [ - (Token.Operator, u'@'), - (Token.Punctuation, u'('), - (Token.Text, u' '), - (Token.Literal.Number.Integer,u'0'), - (Token.Text, u' '), - (Token.Operator,u'>'), - (Token.Text, u' '), - (Token.Literal.Number.Integer, u'3'), - (Token.Text, u' '), - (Token.Punctuation, u')'), - (Token.Text, u' '), - (Token.Keyword, u'ஆனால்'), - (Token.Text, u'\n'), - (Token.Text, u'\t'), - (Token.Keyword, u'பதிப்பி'), - (Token.Text, u' '), - (Token.Literal.String, u'"wont print"'), - (Token.Text, u'\t'), - (Token.Text, u'\n'), - (Token.Keyword, u'முடி'), - (Token.Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + tokens = [ + (Token.Operator, u'@'), + (Token.Punctuation, u'('), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'0'), + (Token.Text, u' '), + (Token.Operator, u'>'), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'3'), + (Token.Text, u' '), + (Token.Punctuation, u')'), + (Token.Text, u' '), + (Token.Keyword, u'ஆனால்'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Keyword, u'பதிப்பி'), + (Token.Text, u' '), + (Token.Literal.String, u'"wont print"'), + (Token.Text, u'\n'), + (Token.Keyword, u'முடி'), + (Token.Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + - def testFunction(self): - fragment = u"""# (C) முத்தையா அண்ணாமலை 2013, 2015 +def test_function(lexer): + fragment = u"""# (C) முத்தையா அண்ணாமலை 2013, 2015 நிரல்பாகம் gcd ( x, y ) - மு = max(x,y) - q = min(x,y) +மு = max(x,y) + q = min(x,y) - @( q == 0 ) ஆனால் - பின்கொடு மு - முடி - பின்கொடு gcd( மு - q , q ) +@( q == 0 ) ஆனால் + பின்கொடு மு +முடி +பின்கொடு gcd( மு - q , q ) முடி\n""" - tokens = [ - (Token.Comment.Single, - u'# (C) \u0bae\u0bc1\u0ba4\u0bcd\u0ba4\u0bc8\u0baf\u0bbe \u0b85' - u'\u0ba3\u0bcd\u0ba3\u0bbe\u0bae\u0bb2\u0bc8 2013, 2015\n'), - (Token.Keyword,u'நிரல்பாகம்'), - (Token.Text, u' '), - (Token.Name, u'gcd'), - (Token.Text, u' '), - (Token.Punctuation, u'('), - (Token.Text, u' '), - (Token.Name, u'x'), - (Token.Operator, u','), - (Token.Text, u' '), - (Token.Name, u'y'), - (Token.Text, u' '), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Name, u'\u0bae\u0bc1'), - (Token.Text, u' '), - (Token.Operator, u'='), - (Token.Text, u' '), - (Token.Name.Builtin, u'max'), - (Token.Punctuation, u'('), - (Token.Name, u'x'), - (Token.Operator, u','), - (Token.Name, u'y'), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Name, u'q'), - (Token.Text, u' '), - (Token.Operator, u'='), - (Token.Text, u' '), - (Token.Name.Builtin, u'min'), - (Token.Punctuation, u'('), - (Token.Name, u'x'), - (Token.Operator, u','), - (Token.Name, u'y'), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Operator, u'@'), - (Token.Punctuation, u'('), - (Token.Text, u' '), - (Token.Name, u'q'), - (Token.Text, u' '), - (Token.Operator, u'=='), - (Token.Text, u' '), - (Token.Literal.Number.Integer, u'0'), - (Token.Text, u' '), - (Token.Punctuation, u')'), - (Token.Text, u' '), - (Token.Keyword, u'ஆனால்'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'பின்கொடு'), - (Token.Text, u' '), - (Token.Name, u'\u0bae\u0bc1'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'முடி'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'\u0baa\u0bbf\u0ba9\u0bcd\u0b95\u0bca\u0b9f\u0bc1'), - (Token.Text, u' '), - (Token.Name, u'gcd'), - (Token.Punctuation, u'('), - (Token.Text, u' '), - (Token.Name, u'\u0bae\u0bc1'), - (Token.Text, u' '), - (Token.Operator, u'-'), - (Token.Text, u' '), - (Token.Name, u'q'), - (Token.Text, u' '), - (Token.Operator, u','), - (Token.Text, u' '), - (Token.Name, u'q'), - (Token.Text, u' '), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - (Token.Keyword, u'முடி'), #u'\u0bae\u0bc1\u0b9f\u0bbf'), - (Token.Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - -if __name__ == "__main__": - unittest.main() + tokens = [ + (Token.Comment.Single, + u'# (C) \u0bae\u0bc1\u0ba4\u0bcd\u0ba4\u0bc8\u0baf\u0bbe \u0b85' + u'\u0ba3\u0bcd\u0ba3\u0bbe\u0bae\u0bb2\u0bc8 2013, 2015\n'), + (Token.Keyword, u'நிரல்பாகம்'), + (Token.Text, u' '), + (Token.Name, u'gcd'), + (Token.Text, u' '), + (Token.Punctuation, u'('), + (Token.Text, u' '), + (Token.Name, u'x'), + (Token.Operator, u','), + (Token.Text, u' '), + (Token.Name, u'y'), + (Token.Text, u' '), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + (Token.Name, u'\u0bae\u0bc1'), + (Token.Text, u' '), + (Token.Operator, u'='), + (Token.Text, u' '), + (Token.Name.Builtin, u'max'), + (Token.Punctuation, u'('), + (Token.Name, u'x'), + (Token.Operator, u','), + (Token.Name, u'y'), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Name, u'q'), + (Token.Text, u' '), + (Token.Operator, u'='), + (Token.Text, u' '), + (Token.Name.Builtin, u'min'), + (Token.Punctuation, u'('), + (Token.Name, u'x'), + (Token.Operator, u','), + (Token.Name, u'y'), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + (Token.Text, u'\n'), + (Token.Operator, u'@'), + (Token.Punctuation, u'('), + (Token.Text, u' '), + (Token.Name, u'q'), + (Token.Text, u' '), + (Token.Operator, u'=='), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'0'), + (Token.Text, u' '), + (Token.Punctuation, u')'), + (Token.Text, u' '), + (Token.Keyword, u'ஆனால்'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Keyword, u'பின்கொடு'), + (Token.Text, u' '), + (Token.Name, u'\u0bae\u0bc1'), + (Token.Text, u'\n'), + (Token.Keyword, u'முடி'), + (Token.Text, u'\n'), + (Token.Keyword, u'\u0baa\u0bbf\u0ba9\u0bcd\u0b95\u0bca\u0b9f\u0bc1'), + (Token.Text, u' '), + (Token.Name, u'gcd'), + (Token.Punctuation, u'('), + (Token.Text, u' '), + (Token.Name, u'\u0bae\u0bc1'), + (Token.Text, u' '), + (Token.Operator, u'-'), + (Token.Text, u' '), + (Token.Name, u'q'), + (Token.Text, u' '), + (Token.Operator, u','), + (Token.Text, u' '), + (Token.Name, u'q'), + (Token.Text, u' '), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + (Token.Keyword, u'முடி'), # u'\u0bae\u0bc1\u0b9f\u0bbf'), + (Token.Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens diff --git a/tests/test_guessing.py b/tests/test_guessing.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_html_formatter.py b/tests/test_html_formatter.py index 37efd6f..9524739 100644 --- a/tests/test_html_formatter.py +++ b/tests/test_html_formatter.py @@ -12,189 +12,199 @@ from __future__ import print_function import io import os import re -import unittest import tempfile -from os.path import join, dirname, isfile +from os import path + +from pytest import raises from pygments.util import StringIO from pygments.lexers import PythonLexer from pygments.formatters import HtmlFormatter, NullFormatter from pygments.formatters.html import escape_html -import support - -TESTFILE, TESTDIR = support.location(__file__) +TESTDIR = path.dirname(path.abspath(__file__)) +TESTFILE = path.join(TESTDIR, 'test_html_formatter.py') with io.open(TESTFILE, encoding='utf-8') as fp: tokensource = list(PythonLexer().get_tokens(fp.read())) -class HtmlFormatterTest(unittest.TestCase): - def test_correct_output(self): - hfmt = HtmlFormatter(nowrap=True) - houtfile = StringIO() - hfmt.format(tokensource, houtfile) - - nfmt = NullFormatter() - noutfile = StringIO() - nfmt.format(tokensource, noutfile) - - stripped_html = re.sub('<.*?>', '', houtfile.getvalue()) - escaped_text = escape_html(noutfile.getvalue()) - self.assertEqual(stripped_html, escaped_text) - - def test_external_css(self): - # test correct behavior - # CSS should be in /tmp directory - fmt1 = HtmlFormatter(full=True, cssfile='fmt1.css', outencoding='utf-8') - # CSS should be in TESTDIR (TESTDIR is absolute) - fmt2 = HtmlFormatter(full=True, cssfile=join(TESTDIR, 'fmt2.css'), - outencoding='utf-8') - tfile = tempfile.NamedTemporaryFile(suffix='.html') - fmt1.format(tokensource, tfile) - try: - fmt2.format(tokensource, tfile) - self.assertTrue(isfile(join(TESTDIR, 'fmt2.css'))) - except IOError: - # test directory not writable - pass - tfile.close() - - self.assertTrue(isfile(join(dirname(tfile.name), 'fmt1.css'))) - os.unlink(join(dirname(tfile.name), 'fmt1.css')) - try: - os.unlink(join(TESTDIR, 'fmt2.css')) - except OSError: - pass - - def test_all_options(self): - def check(optdict): - outfile = StringIO() - fmt = HtmlFormatter(**optdict) - fmt.format(tokensource, outfile) - - for optdict in [ - dict(nowrap=True), - dict(linenos=True, full=True), - dict(linenos=True, linespans='L'), - dict(hl_lines=[1, 5, 10, 'xxx']), - dict(hl_lines=[1, 5, 10], noclasses=True), - ]: - check(optdict) - - for linenos in [False, 'table', 'inline']: - for noclasses in [False, True]: - for linenospecial in [0, 5]: - for anchorlinenos in [False, True]: - optdict = dict( - linenos=linenos, - noclasses=noclasses, - linenospecial=linenospecial, - anchorlinenos=anchorlinenos, - ) - check(optdict) - - def test_linenos(self): - optdict = dict(linenos=True) +def test_correct_output(): + hfmt = HtmlFormatter(nowrap=True) + houtfile = StringIO() + hfmt.format(tokensource, houtfile) + + nfmt = NullFormatter() + noutfile = StringIO() + nfmt.format(tokensource, noutfile) + + stripped_html = re.sub('<.*?>', '', houtfile.getvalue()) + escaped_text = escape_html(noutfile.getvalue()) + assert stripped_html == escaped_text + + +def test_external_css(): + # test correct behavior + # CSS should be in /tmp directory + fmt1 = HtmlFormatter(full=True, cssfile='fmt1.css', outencoding='utf-8') + # CSS should be in TESTDIR (TESTDIR is absolute) + fmt2 = HtmlFormatter(full=True, cssfile=path.join(TESTDIR, 'fmt2.css'), + outencoding='utf-8') + tfile = tempfile.NamedTemporaryFile(suffix='.html') + fmt1.format(tokensource, tfile) + try: + fmt2.format(tokensource, tfile) + assert path.isfile(path.join(TESTDIR, 'fmt2.css')) + except IOError: + # test directory not writable + pass + tfile.close() + + assert path.isfile(path.join(path.dirname(tfile.name), 'fmt1.css')) + os.unlink(path.join(path.dirname(tfile.name), 'fmt1.css')) + try: + os.unlink(path.join(TESTDIR, 'fmt2.css')) + except OSError: + pass + + +def test_all_options(): + def check(optdict): outfile = StringIO() fmt = HtmlFormatter(**optdict) fmt.format(tokensource, outfile) - html = outfile.getvalue() - self.assertTrue(re.search(r"
\s+1\s+2\s+3", html))
 
-    def test_linenos_with_startnum(self):
-        optdict = dict(linenos=True, linenostart=5)
+    for optdict in [
+        dict(nowrap=True),
+        dict(linenos=True, full=True),
+        dict(linenos=True, linespans='L'),
+        dict(hl_lines=[1, 5, 10, 'xxx']),
+        dict(hl_lines=[1, 5, 10], noclasses=True),
+    ]:
+        check(optdict)
+
+    for linenos in [False, 'table', 'inline']:
+        for noclasses in [False, True]:
+            for linenospecial in [0, 5]:
+                for anchorlinenos in [False, True]:
+                    optdict = dict(
+                        linenos=linenos,
+                        noclasses=noclasses,
+                        linenospecial=linenospecial,
+                        anchorlinenos=anchorlinenos,
+                    )
+                    check(optdict)
+
+
+def test_linenos():
+    optdict = dict(linenos=True)
+    outfile = StringIO()
+    fmt = HtmlFormatter(**optdict)
+    fmt.format(tokensource, outfile)
+    html = outfile.getvalue()
+    assert re.search(r"
\s+1\s+2\s+3", html)
+
+
+def test_linenos_with_startnum():
+    optdict = dict(linenos=True, linenostart=5)
+    outfile = StringIO()
+    fmt = HtmlFormatter(**optdict)
+    fmt.format(tokensource, outfile)
+    html = outfile.getvalue()
+    assert re.search(r"
\s+5\s+6\s+7", html)
+
+
+def test_lineanchors():
+    optdict = dict(lineanchors="foo")
+    outfile = StringIO()
+    fmt = HtmlFormatter(**optdict)
+    fmt.format(tokensource, outfile)
+    html = outfile.getvalue()
+    assert re.search("
", html)
+
+
+def test_lineanchors_with_startnum():
+    optdict = dict(lineanchors="foo", linenostart=5)
+    outfile = StringIO()
+    fmt = HtmlFormatter(**optdict)
+    fmt.format(tokensource, outfile)
+    html = outfile.getvalue()
+    assert re.search("
", html)
+
+
+def test_valid_output():
+    # test all available wrappers
+    fmt = HtmlFormatter(full=True, linenos=True, noclasses=True,
+                        outencoding='utf-8')
+
+    handle, pathname = tempfile.mkstemp('.html')
+    with os.fdopen(handle, 'w+b') as tfile:
+        fmt.format(tokensource, tfile)
+    catname = os.path.join(TESTDIR, 'dtds', 'HTML4.soc')
+    try:
+        import subprocess
+        po = subprocess.Popen(['nsgmls', '-s', '-c', catname, pathname],
+                              stdout=subprocess.PIPE)
+        ret = po.wait()
+        output = po.stdout.read()
+        po.stdout.close()
+    except OSError:
+        # nsgmls not available
+        pass
+    else:
+        if ret:
+            print(output)
+        assert not ret, 'nsgmls run reported errors'
+
+    os.unlink(pathname)
+
+
+def test_get_style_defs():
+    fmt = HtmlFormatter()
+    sd = fmt.get_style_defs()
+    assert sd.startswith('.')
+
+    fmt = HtmlFormatter(cssclass='foo')
+    sd = fmt.get_style_defs()
+    assert sd.startswith('.foo')
+    sd = fmt.get_style_defs('.bar')
+    assert sd.startswith('.bar')
+    sd = fmt.get_style_defs(['.bar', '.baz'])
+    fl = sd.splitlines()[0]
+    assert '.bar' in fl and '.baz' in fl
+
+
+def test_unicode_options():
+    fmt = HtmlFormatter(title=u'Föö',
+                        cssclass=u'bär',
+                        cssstyles=u'div:before { content: \'bäz\' }',
+                        encoding='utf-8')
+    handle, pathname = tempfile.mkstemp('.html')
+    with os.fdopen(handle, 'w+b') as tfile:
+        fmt.format(tokensource, tfile)
+
+
+def test_ctags():
+    try:
+        import ctags
+    except ImportError:
+        # we can't check without the ctags module, but at least check the exception
+        assert raises(RuntimeError, HtmlFormatter, tagsfile='support/tags')
+    else:
+        # this tagfile says that test_ctags() is on line 165, even if it isn't
+        # anymore in the actual source
+        fmt = HtmlFormatter(tagsfile='support/tags', lineanchors='L',
+                            tagurlformat='%(fname)s%(fext)s')
         outfile = StringIO()
-        fmt = HtmlFormatter(**optdict)
         fmt.format(tokensource, outfile)
-        html = outfile.getvalue()
-        self.assertTrue(re.search(r"
\s+5\s+6\s+7", html))
+        assert 'test_ctags' \
+            in outfile.getvalue()
 
-    def test_lineanchors(self):
-        optdict = dict(lineanchors="foo")
-        outfile = StringIO()
-        fmt = HtmlFormatter(**optdict)
-        fmt.format(tokensource, outfile)
-        html = outfile.getvalue()
-        self.assertTrue(re.search("
", html))
 
-    def test_lineanchors_with_startnum(self):
-        optdict = dict(lineanchors="foo", linenostart=5)
-        outfile = StringIO()
-        fmt = HtmlFormatter(**optdict)
-        fmt.format(tokensource, outfile)
-        html = outfile.getvalue()
-        self.assertTrue(re.search("
", html))
-
-    def test_valid_output(self):
-        # test all available wrappers
-        fmt = HtmlFormatter(full=True, linenos=True, noclasses=True,
-                            outencoding='utf-8')
-
-        handle, pathname = tempfile.mkstemp('.html')
-        with os.fdopen(handle, 'w+b') as tfile:
-            fmt.format(tokensource, tfile)
-        catname = os.path.join(TESTDIR, 'dtds', 'HTML4.soc')
-        try:
-            import subprocess
-            po = subprocess.Popen(['nsgmls', '-s', '-c', catname, pathname],
-                                  stdout=subprocess.PIPE)
-            ret = po.wait()
-            output = po.stdout.read()
-            po.stdout.close()
-        except OSError:
-            # nsgmls not available
-            pass
-        else:
-            if ret:
-                print(output)
-            self.assertFalse(ret, 'nsgmls run reported errors')
-
-        os.unlink(pathname)
-
-    def test_get_style_defs(self):
-        fmt = HtmlFormatter()
-        sd = fmt.get_style_defs()
-        self.assertTrue(sd.startswith('.'))
-
-        fmt = HtmlFormatter(cssclass='foo')
-        sd = fmt.get_style_defs()
-        self.assertTrue(sd.startswith('.foo'))
-        sd = fmt.get_style_defs('.bar')
-        self.assertTrue(sd.startswith('.bar'))
-        sd = fmt.get_style_defs(['.bar', '.baz'])
-        fl = sd.splitlines()[0]
-        self.assertTrue('.bar' in fl and '.baz' in fl)
-
-    def test_unicode_options(self):
-        fmt = HtmlFormatter(title=u'Föö',
-                            cssclass=u'bär',
-                            cssstyles=u'div:before { content: \'bäz\' }',
-                            encoding='utf-8')
-        handle, pathname = tempfile.mkstemp('.html')
-        with os.fdopen(handle, 'w+b') as tfile:
-            fmt.format(tokensource, tfile)
-
-    def test_ctags(self):
-        try:
-            import ctags
-        except ImportError:
-            # we can't check without the ctags module, but at least check the exception
-            self.assertRaises(RuntimeError, HtmlFormatter, tagsfile='support/tags')
-        else:
-            # this tagfile says that test_ctags() is on line 165, even if it isn't
-            # anymore in the actual source
-            fmt = HtmlFormatter(tagsfile='support/tags', lineanchors='L',
-                                tagurlformat='%(fname)s%(fext)s')
-            outfile = StringIO()
-            fmt.format(tokensource, outfile)
-            self.assertTrue('test_ctags'
-                            in outfile.getvalue())
-
-    def test_filename(self):
-        optdict = dict(filename="test.py")
-        outfile = StringIO()
-        fmt = HtmlFormatter(**optdict)
-        fmt.format(tokensource, outfile)
-        html = outfile.getvalue()
-        self.assertTrue(re.search("test.py
", html))
+def test_filename():
+    optdict = dict(filename="test.py")
+    outfile = StringIO()
+    fmt = HtmlFormatter(**optdict)
+    fmt.format(tokensource, outfile)
+    html = outfile.getvalue()
+    assert re.search("test.py
", html)
diff --git a/tests/test_inherit.py b/tests/test_inherit.py
index 38acf32..0352772 100644
--- a/tests/test_inherit.py
+++ b/tests/test_inherit.py
@@ -7,37 +7,10 @@
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
-
 from pygments.lexer import RegexLexer, inherit
 from pygments.token import Text
 
 
-class InheritTest(unittest.TestCase):
-    def test_single_inheritance_position(self):
-        t = Two()
-        pats = [x[0].__self__.pattern for x in t._tokens['root']]
-        self.assertEqual(['x', 'a', 'b', 'y'], pats)
-    def test_multi_inheritance_beginning(self):
-        t = Beginning()
-        pats = [x[0].__self__.pattern for x in t._tokens['root']]
-        self.assertEqual(['x', 'a', 'b', 'y', 'm'], pats)
-    def test_multi_inheritance_end(self):
-        t = End()
-        pats = [x[0].__self__.pattern for x in t._tokens['root']]
-        self.assertEqual(['m', 'x', 'a', 'b', 'y'], pats)
-
-    def test_multi_inheritance_position(self):
-        t = Three()
-        pats = [x[0].__self__.pattern for x in t._tokens['root']]
-        self.assertEqual(['i', 'x', 'a', 'b', 'y', 'j'], pats)
-
-    def test_single_inheritance_with_skip(self):
-        t = Skipped()
-        pats = [x[0].__self__.pattern for x in t._tokens['root']]
-        self.assertEqual(['x', 'a', 'b', 'y'], pats)
-
-
 class One(RegexLexer):
     tokens = {
         'root': [
@@ -46,6 +19,7 @@ class One(RegexLexer):
         ],
     }
 
+
 class Two(One):
     tokens = {
         'root': [
@@ -55,6 +29,7 @@ class Two(One):
         ],
     }
 
+
 class Three(Two):
     tokens = {
         'root': [
@@ -64,6 +39,7 @@ class Three(Two):
         ],
     }
 
+
 class Beginning(Two):
     tokens = {
         'root': [
@@ -72,6 +48,7 @@ class Beginning(Two):
         ],
     }
 
+
 class End(Two):
     tokens = {
         'root': [
@@ -80,9 +57,11 @@ class End(Two):
         ],
     }
 
+
 class Empty(One):
     tokens = {}
 
+
 class Skipped(Empty):
     tokens = {
         'root': [
@@ -92,3 +71,32 @@ class Skipped(Empty):
         ],
     }
 
+
+def test_single_inheritance_position():
+    t = Two()
+    pats = [x[0].__self__.pattern for x in t._tokens['root']]
+    assert ['x', 'a', 'b', 'y'] == pats
+
+
+def test_multi_inheritance_beginning():
+    t = Beginning()
+    pats = [x[0].__self__.pattern for x in t._tokens['root']]
+    assert ['x', 'a', 'b', 'y', 'm'] == pats
+
+
+def test_multi_inheritance_end():
+    t = End()
+    pats = [x[0].__self__.pattern for x in t._tokens['root']]
+    assert ['m', 'x', 'a', 'b', 'y'] == pats
+
+
+def test_multi_inheritance_position():
+    t = Three()
+    pats = [x[0].__self__.pattern for x in t._tokens['root']]
+    assert ['i', 'x', 'a', 'b', 'y', 'j'] == pats
+
+
+def test_single_inheritance_with_skip():
+    t = Skipped()
+    pats = [x[0].__self__.pattern for x in t._tokens['root']]
+    assert ['x', 'a', 'b', 'y'] == pats
diff --git a/tests/test_irc_formatter.py b/tests/test_irc_formatter.py
index 18bcd58..046a0d1 100644
--- a/tests/test_irc_formatter.py
+++ b/tests/test_irc_formatter.py
@@ -9,22 +9,16 @@
 
 from __future__ import print_function
 
-import re
-import unittest
-
 from pygments.util import StringIO
 from pygments.lexers import PythonLexer
 from pygments.formatters import IRCFormatter
 
-import support
-
 tokensource = list(PythonLexer().get_tokens("lambda x: 123"))
 
-class IRCFormatterTest(unittest.TestCase):
-    def test_correct_output(self):
-        hfmt = IRCFormatter()
-        houtfile = StringIO()
-        hfmt.format(tokensource, houtfile)
 
-        self.assertEqual(u'\x0302lambda\x03 x: \x0302123\x03\n', houtfile.getvalue())
+def test_correct_output():
+    hfmt = IRCFormatter()
+    houtfile = StringIO()
+    hfmt.format(tokensource, houtfile)
 
+    assert u'\x0302lambda\x03 x: \x0302123\x03\n' == houtfile.getvalue()
diff --git a/tests/test_java.py b/tests/test_java.py
index 5f52085..9446324 100644
--- a/tests/test_java.py
+++ b/tests/test_java.py
@@ -7,72 +7,72 @@
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
-from pygments.token import Text, Name, Operator, Keyword, Number
+from pygments.token import Text, Name, Punctuation, Keyword, Number
 from pygments.lexers import JavaLexer
 
 
-class JavaTest(unittest.TestCase):
+@pytest.fixture(scope='module')
+def lexer():
+    yield JavaLexer()
 
-    def setUp(self):
-        self.lexer = JavaLexer()
-        self.maxDiff = None
 
-    def testEnhancedFor(self):
-        fragment = u'label:\nfor(String var2: var1) {}\n'
-        tokens = [
-            (Name.Label, u'label:'),
-            (Text, u'\n'),
-            (Keyword, u'for'),
-            (Operator, u'('),
-            (Name, u'String'),
-            (Text, u' '),
-            (Name, u'var2'),
-            (Operator, u':'),
-            (Text, u' '),
-            (Name, u'var1'),
-            (Operator, u')'),
-            (Text, u' '),
-            (Operator, u'{'),
-            (Operator, u'}'),
-            (Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+def test_enhanced_for(lexer):
+    fragment = u'label:\nfor(String var2: var1) {}\n'
+    tokens = [
+        (Name.Label, u'label:'),
+        (Text, u'\n'),
+        (Keyword, u'for'),
+        (Punctuation, u'('),
+        (Name, u'String'),
+        (Text, u' '),
+        (Name, u'var2'),
+        (Punctuation, u':'),
+        (Text, u' '),
+        (Name, u'var1'),
+        (Punctuation, u')'),
+        (Text, u' '),
+        (Punctuation, u'{'),
+        (Punctuation, u'}'),
+        (Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
 
-    def testNumericLiterals(self):
-        fragment = '0 5L 9__542_72l 0xbEEf 0X9_A 0_35 01 0b0___101_0'
-        fragment += ' 0. .7_17F 3e-1_3d 1f 6_01.9e+3 0x.1Fp3 0XEP8D\n'
-        tokens = [
-            (Number.Integer, '0'),
-            (Text, ' '),
-            (Number.Integer, '5L'),
-            (Text, ' '),
-            (Number.Integer, '9__542_72l'),
-            (Text, ' '),
-            (Number.Hex, '0xbEEf'),
-            (Text, ' '),
-            (Number.Hex, '0X9_A'),
-            (Text, ' '),
-            (Number.Oct, '0_35'),
-            (Text, ' '),
-            (Number.Oct, '01'),
-            (Text, ' '),
-            (Number.Bin, '0b0___101_0'),
-            (Text, ' '),
-            (Number.Float, '0.'),
-            (Text, ' '),
-            (Number.Float, '.7_17F'),
-            (Text, ' '),
-            (Number.Float, '3e-1_3d'),
-            (Text, ' '),
-            (Number.Float, '1f'),
-            (Text, ' '),
-            (Number.Float, '6_01.9e+3'),
-            (Text, ' '),
-            (Number.Float, '0x.1Fp3'),
-            (Text, ' '),
-            (Number.Float, '0XEP8D'),
-            (Text, '\n')
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+def test_numeric_literals(lexer):
+    fragment = '0 5L 9__542_72l 0xbEEf 0X9_A 0_35 01 0b0___101_0'
+    fragment += ' 0. .7_17F 3e-1_3d 1f 6_01.9e+3 0x.1Fp3 0XEP8D\n'
+    tokens = [
+        (Number.Integer, '0'),
+        (Text, ' '),
+        (Number.Integer, '5L'),
+        (Text, ' '),
+        (Number.Integer, '9__542_72l'),
+        (Text, ' '),
+        (Number.Hex, '0xbEEf'),
+        (Text, ' '),
+        (Number.Hex, '0X9_A'),
+        (Text, ' '),
+        (Number.Oct, '0_35'),
+        (Text, ' '),
+        (Number.Oct, '01'),
+        (Text, ' '),
+        (Number.Bin, '0b0___101_0'),
+        (Text, ' '),
+        (Number.Float, '0.'),
+        (Text, ' '),
+        (Number.Float, '.7_17F'),
+        (Text, ' '),
+        (Number.Float, '3e-1_3d'),
+        (Text, ' '),
+        (Number.Float, '1f'),
+        (Text, ' '),
+        (Number.Float, '6_01.9e+3'),
+        (Text, ' '),
+        (Number.Float, '0x.1Fp3'),
+        (Text, ' '),
+        (Number.Float, '0XEP8D'),
+        (Text, '\n')
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_javascript.py b/tests/test_javascript.py
index a2dfb7e..25e06fd 100644
--- a/tests/test_javascript.py
+++ b/tests/test_javascript.py
@@ -7,7 +7,7 @@
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.lexers import CoffeeScriptLexer
 from pygments.token import Token
@@ -36,49 +36,50 @@ COFFEE_SLASH_GOLDEN = [
     ('a = 1 + /d/.test(a)', True),
 ]
 
-def test_coffee_slashes():
-    for input_str, slashes_are_regex_here in COFFEE_SLASH_GOLDEN:
-        yield coffee_runner, input_str, slashes_are_regex_here
 
-def coffee_runner(input_str, slashes_are_regex_here):
-    lex = CoffeeScriptLexer()
-    output = list(lex.get_tokens(input_str))
+@pytest.fixture(scope='module')
+def lexer():
+    yield CoffeeScriptLexer()
+
+
+@pytest.mark.parametrize('golden', COFFEE_SLASH_GOLDEN)
+def test_coffee_slashes(lexer, golden):
+    input_str, slashes_are_regex_here = golden
+    output = list(lexer.get_tokens(input_str))
     print(output)
     for t, s in output:
         if '/' in s:
             is_regex = t is Token.String.Regex
             assert is_regex == slashes_are_regex_here, (t, s)
 
-class CoffeeTest(unittest.TestCase):
-    def setUp(self):
-        self.lexer = CoffeeScriptLexer()
 
-    def testMixedSlashes(self):
-        fragment = u'a?/foo/:1/2;\n'
-        tokens = [
-            (Token.Name.Other, u'a'),
-            (Token.Operator, u'?'),
-            (Token.Literal.String.Regex, u'/foo/'),
-            (Token.Operator, u':'),
-            (Token.Literal.Number.Integer, u'1'),
-            (Token.Operator, u'/'),
-            (Token.Literal.Number.Integer, u'2'),
-            (Token.Punctuation, u';'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+def test_mixed_slashes(lexer):
+    fragment = u'a?/foo/:1/2;\n'
+    tokens = [
+        (Token.Name.Other, u'a'),
+        (Token.Operator, u'?'),
+        (Token.Literal.String.Regex, u'/foo/'),
+        (Token.Operator, u':'),
+        (Token.Literal.Number.Integer, u'1'),
+        (Token.Operator, u'/'),
+        (Token.Literal.Number.Integer, u'2'),
+        (Token.Punctuation, u';'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
 
-    def testBewareInfiniteLoop(self):
-        # This demonstrates the case that "This isn't really guarding" comment
-        # refers to.
-        fragment = '/a/x;\n'
-        tokens = [
-            (Token.Text, ''),
-            (Token.Operator, '/'),
-            (Token.Name.Other, 'a'),
-            (Token.Operator, '/'),
-            (Token.Name.Other, 'x'),
-            (Token.Punctuation, ';'),
-            (Token.Text, '\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+def test_beware_infinite_loop(lexer):
+    # This demonstrates the case that "This isn't really guarding" comment
+    # refers to.
+    fragment = '/a/x;\n'
+    tokens = [
+        (Token.Text, ''),
+        (Token.Operator, '/'),
+        (Token.Name.Other, 'a'),
+        (Token.Operator, '/'),
+        (Token.Name.Other, 'x'),
+        (Token.Punctuation, ';'),
+        (Token.Text, '\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_julia.py b/tests/test_julia.py
index eda04b1..3f11593 100644
--- a/tests/test_julia.py
+++ b/tests/test_julia.py
@@ -7,52 +7,53 @@
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
-from pygments.lexers import JuliaLexer
 from pygments.token import Token
+from pygments.lexers import JuliaLexer
+
 
+@pytest.fixture(scope='module')
+def lexer():
+    yield JuliaLexer()
 
-class JuliaTests(unittest.TestCase):
-    def setUp(self):
-        self.lexer = JuliaLexer()
 
-    def test_unicode(self):
-        """
-        Test that unicode character, √, in an expression is recognized
-        """
-        fragment = u's = \u221a((1/n) * sum(count .^ 2) - mu .^2)\n'
-        tokens = [
-            (Token.Name, u's'),
-            (Token.Text, u' '),
-            (Token.Operator, u'='),
-            (Token.Text, u' '),
-            (Token.Operator, u'\u221a'),
-            (Token.Punctuation, u'('),
-            (Token.Punctuation, u'('),
-            (Token.Literal.Number.Integer, u'1'),
-            (Token.Operator, u'/'),
-            (Token.Name, u'n'),
-            (Token.Punctuation, u')'),
-            (Token.Text, u' '),
-            (Token.Operator, u'*'),
-            (Token.Text, u' '),
-            (Token.Name, u'sum'),
-            (Token.Punctuation, u'('),
-            (Token.Name, u'count'),
-            (Token.Text, u' '),
-            (Token.Operator, u'.^'),
-            (Token.Text, u' '),
-            (Token.Literal.Number.Integer, u'2'),
-            (Token.Punctuation, u')'),
-            (Token.Text, u' '),
-            (Token.Operator, u'-'),
-            (Token.Text, u' '),
-            (Token.Name, u'mu'),
-            (Token.Text, u' '),
-            (Token.Operator, u'.^'),
-            (Token.Literal.Number.Integer, u'2'),
-            (Token.Punctuation, u')'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+def test_unicode(lexer):
+    """
+    Test that unicode character, √, in an expression is recognized
+    """
+    fragment = u's = \u221a((1/n) * sum(count .^ 2) - mu .^2)\n'
+    tokens = [
+        (Token.Name, u's'),
+        (Token.Text, u' '),
+        (Token.Operator, u'='),
+        (Token.Text, u' '),
+        (Token.Operator, u'\u221a'),
+        (Token.Punctuation, u'('),
+        (Token.Punctuation, u'('),
+        (Token.Literal.Number.Integer, u'1'),
+        (Token.Operator, u'/'),
+        (Token.Name, u'n'),
+        (Token.Punctuation, u')'),
+        (Token.Text, u' '),
+        (Token.Operator, u'*'),
+        (Token.Text, u' '),
+        (Token.Name, u'sum'),
+        (Token.Punctuation, u'('),
+        (Token.Name, u'count'),
+        (Token.Text, u' '),
+        (Token.Operator, u'.^'),
+        (Token.Text, u' '),
+        (Token.Literal.Number.Integer, u'2'),
+        (Token.Punctuation, u')'),
+        (Token.Text, u' '),
+        (Token.Operator, u'-'),
+        (Token.Text, u' '),
+        (Token.Name, u'mu'),
+        (Token.Text, u' '),
+        (Token.Operator, u'.^'),
+        (Token.Literal.Number.Integer, u'2'),
+        (Token.Punctuation, u')'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_kotlin.py b/tests/test_kotlin.py
index 417d0d9..69b19c1 100644
--- a/tests/test_kotlin.py
+++ b/tests/test_kotlin.py
@@ -7,125 +7,127 @@
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
-from pygments.token import Text, Name, Operator, Keyword, Number, Punctuation, String
+from pygments.token import Text, Name, Keyword, Punctuation, String
 from pygments.lexers import KotlinLexer
 
-class KotlinTest(unittest.TestCase):
-
-    def setUp(self):
-        self.lexer = KotlinLexer()
-        self.maxDiff = None
-    
-    def testCanCopeWithBackTickNamesInFunctions(self):
-        fragment = u'fun `wo bble`'
-        tokens = [
-            (Keyword, u'fun'),
-            (Text, u' '),
-            (Name.Function, u'`wo bble`'),
-            (Text, u'\n')
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testCanCopeWithCommasAndDashesInBackTickNames(self):
-        fragment = u'fun `wo,-bble`'
-        tokens = [
-            (Keyword, u'fun'),
-            (Text, u' '),
-            (Name.Function, u'`wo,-bble`'),
-            (Text, u'\n')
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-    
-    def testCanCopeWithDestructuring(self):
-        fragment = u'val (a, b) = '
-        tokens = [
-            (Keyword, u'val'),
-            (Text, u' '),
-            (Punctuation, u'('),
-            (Name.Property, u'a'),
-            (Punctuation, u','),
-            (Text, u' '),
-            (Name.Property, u'b'),
-            (Punctuation, u')'),
-            (Text, u' '),
-            (Punctuation, u'='),
-            (Text, u' '),
-            (Text, u'\n')
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-    
-    def testCanCopeGenericsInDestructuring(self):
-        fragment = u'val (a: List, b: Set) ='
-        tokens = [
-            (Keyword, u'val'),
-            (Text, u' '),
-            (Punctuation, u'('),
-            (Name.Property, u'a'),
-            (Punctuation, u':'),
-            (Text, u' '),
-            (Name.Property, u'List'),
-            (Punctuation, u'<'),
-            (Name, u'Something'),
-            (Punctuation, u'>'),
-            (Punctuation, u','),
-            (Text, u' '),
-            (Name.Property, u'b'),
-            (Punctuation, u':'),
-            (Text, u' '),
-            (Name.Property, u'Set'),
-            (Punctuation, u'<'),
-            (Name, u'Wobble'),
-            (Punctuation, u'>'),
-            (Punctuation, u')'),
-            (Text, u' '),
-            (Punctuation, u'='),
-            (Text, u'\n')
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testCanCopeWithGenerics(self):
-        fragment = u'inline fun  VaultService.queryBy(): Vault.Page {'
-        tokens = [
-            (Keyword, u'inline fun'),
-            (Text, u' '),
-            (Punctuation, u'<'),
-            (Keyword, u'reified'),
-            (Text, u' '),
-            (Name, u'T'),
-            (Text, u' '),
-            (Punctuation, u':'),
-            (Text, u' '),
-            (Name, u'ContractState'),
-            (Punctuation, u'>'),
-            (Text, u' '),
-            (Name.Class, u'VaultService'),
-            (Punctuation, u'.'),
-            (Name.Function, u'queryBy'),
-            (Punctuation, u'('),
-            (Punctuation, u')'),
-            (Punctuation, u':'),
-            (Text, u' '),
-            (Name, u'Vault'),
-            (Punctuation, u'.'),
-            (Name, u'Page'),
-            (Punctuation, u'<'),
-            (Name, u'T'),
-            (Punctuation, u'>'),
-            (Text, u' '),
-            (Punctuation, u'{'),
-            (Text, u'\n')
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testShouldCopeWithMultilineComments(self):
-        fragment = u'"""\nthis\nis\na\ncomment"""'
-        tokens = [
-            (String, u'"""\nthis\nis\na\ncomment"""'), 
-            (Text, u'\n')
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-if __name__ == '__main__':
-    unittest.main()
+
+@pytest.fixture(scope='module')
+def lexer():
+    yield KotlinLexer()
+
+
+def test_can_cope_with_backtick_names_in_functions(lexer):
+    fragment = u'fun `wo bble`'
+    tokens = [
+        (Keyword, u'fun'),
+        (Text, u' '),
+        (Name.Function, u'`wo bble`'),
+        (Text, u'\n')
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_can_cope_with_commas_and_dashes_in_backtick_Names(lexer):
+    fragment = u'fun `wo,-bble`'
+    tokens = [
+        (Keyword, u'fun'),
+        (Text, u' '),
+        (Name.Function, u'`wo,-bble`'),
+        (Text, u'\n')
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_can_cope_with_destructuring(lexer):
+    fragment = u'val (a, b) = '
+    tokens = [
+        (Keyword, u'val'),
+        (Text, u' '),
+        (Punctuation, u'('),
+        (Name.Property, u'a'),
+        (Punctuation, u','),
+        (Text, u' '),
+        (Name.Property, u'b'),
+        (Punctuation, u')'),
+        (Text, u' '),
+        (Punctuation, u'='),
+        (Text, u' '),
+        (Text, u'\n')
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_can_cope_generics_in_destructuring(lexer):
+    fragment = u'val (a: List, b: Set) ='
+    tokens = [
+        (Keyword, u'val'),
+        (Text, u' '),
+        (Punctuation, u'('),
+        (Name.Property, u'a'),
+        (Punctuation, u':'),
+        (Text, u' '),
+        (Name.Property, u'List'),
+        (Punctuation, u'<'),
+        (Name, u'Something'),
+        (Punctuation, u'>'),
+        (Punctuation, u','),
+        (Text, u' '),
+        (Name.Property, u'b'),
+        (Punctuation, u':'),
+        (Text, u' '),
+        (Name.Property, u'Set'),
+        (Punctuation, u'<'),
+        (Name, u'Wobble'),
+        (Punctuation, u'>'),
+        (Punctuation, u')'),
+        (Text, u' '),
+        (Punctuation, u'='),
+        (Text, u'\n')
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_can_cope_with_generics(lexer):
+    fragment = u'inline fun  VaultService.queryBy(): Vault.Page {'
+    tokens = [
+        (Keyword, u'inline fun'),
+        (Text, u' '),
+        (Punctuation, u'<'),
+        (Keyword, u'reified'),
+        (Text, u' '),
+        (Name, u'T'),
+        (Text, u' '),
+        (Punctuation, u':'),
+        (Text, u' '),
+        (Name, u'ContractState'),
+        (Punctuation, u'>'),
+        (Text, u' '),
+        (Name.Class, u'VaultService'),
+        (Punctuation, u'.'),
+        (Name.Function, u'queryBy'),
+        (Punctuation, u'('),
+        (Punctuation, u')'),
+        (Punctuation, u':'),
+        (Text, u' '),
+        (Name, u'Vault'),
+        (Punctuation, u'.'),
+        (Name, u'Page'),
+        (Punctuation, u'<'),
+        (Name, u'T'),
+        (Punctuation, u'>'),
+        (Text, u' '),
+        (Punctuation, u'{'),
+        (Text, u'\n')
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_should_cope_with_multiline_comments(lexer):
+    fragment = u'"""\nthis\nis\na\ncomment"""'
+    tokens = [
+        (String, u'"""\nthis\nis\na\ncomment"""'),
+        (Text, u'\n')
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_latex_formatter.py b/tests/test_latex_formatter.py
index aa4ac3b..7ab0d7d 100644
--- a/tests/test_latex_formatter.py
+++ b/tests/test_latex_formatter.py
@@ -10,45 +10,44 @@
 from __future__ import print_function
 
 import os
-import unittest
 import tempfile
+from os import path
+
+import pytest
 
 from pygments.formatters import LatexFormatter
 from pygments.lexers import PythonLexer
 
-import support
-
-TESTFILE, TESTDIR = support.location(__file__)
-
-
-class LatexFormatterTest(unittest.TestCase):
-
-    def test_valid_output(self):
-        with open(TESTFILE) as fp:
-            tokensource = list(PythonLexer().get_tokens(fp.read()))
-        fmt = LatexFormatter(full=True, encoding='latin1')
-
-        handle, pathname = tempfile.mkstemp('.tex')
-        # place all output files in /tmp too
-        old_wd = os.getcwd()
-        os.chdir(os.path.dirname(pathname))
-        tfile = os.fdopen(handle, 'wb')
-        fmt.format(tokensource, tfile)
-        tfile.close()
-        try:
-            import subprocess
-            po = subprocess.Popen(['latex', '-interaction=nonstopmode',
-                                   pathname], stdout=subprocess.PIPE)
-            ret = po.wait()
-            output = po.stdout.read()
-            po.stdout.close()
-        except OSError as e:
-            # latex not available
-            raise support.SkipTest(e)
-        else:
-            if ret:
-                print(output)
-            self.assertFalse(ret, 'latex run reported errors')
-
-        os.unlink(pathname)
-        os.chdir(old_wd)
+TESTDIR = path.dirname(path.abspath(__file__))
+TESTFILE = path.join(TESTDIR, 'test_latex_formatter.py')
+
+
+def test_valid_output():
+    with open(TESTFILE) as fp:
+        tokensource = list(PythonLexer().get_tokens(fp.read()))
+    fmt = LatexFormatter(full=True, encoding='latin1')
+
+    handle, pathname = tempfile.mkstemp('.tex')
+    # place all output files in /tmp too
+    old_wd = os.getcwd()
+    os.chdir(os.path.dirname(pathname))
+    tfile = os.fdopen(handle, 'wb')
+    fmt.format(tokensource, tfile)
+    tfile.close()
+    try:
+        import subprocess
+        po = subprocess.Popen(['latex', '-interaction=nonstopmode',
+                               pathname], stdout=subprocess.PIPE)
+        ret = po.wait()
+        output = po.stdout.read()
+        po.stdout.close()
+    except OSError as e:
+        # latex not available
+        pytest.skip(str(e))
+    else:
+        if ret:
+            print(output)
+        assert not ret, 'latex run reported errors'
+
+    os.unlink(pathname)
+    os.chdir(old_wd)
diff --git a/tests/test_lexers_other.py b/tests/test_lexers_other.py
index 8d53c54..3e8d3fc 100644
--- a/tests/test_lexers_other.py
+++ b/tests/test_lexers_other.py
@@ -6,75 +6,65 @@
     :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
+
 import glob
 import os
-import unittest
+
+import pytest
 
 from pygments.lexers import guess_lexer
 from pygments.lexers.scripting import EasytrieveLexer, JclLexer, RexxLexer
 
 
-def _exampleFilePath(filename):
+def _example_file_path(filename):
     return os.path.join(os.path.dirname(__file__), 'examplefiles', filename)
 
 
-class AnalyseTextTest(unittest.TestCase):
-    def _testCanRecognizeAndGuessExampleFiles(self, lexer):
-        assert lexer is not None
-
-        for pattern in lexer.filenames:
-            exampleFilesPattern = _exampleFilePath(pattern)
-            for exampleFilePath in glob.glob(exampleFilesPattern):
-                with open(exampleFilePath, 'rb') as fp:
-                    text = fp.read().decode('utf-8')
-                probability = lexer.analyse_text(text)
-                self.assertTrue(probability > 0,
-                                '%s must recognize %r' % (
-                                    lexer.name, exampleFilePath))
-                guessedLexer = guess_lexer(text)
-                self.assertEqual(guessedLexer.name, lexer.name)
-
-    def testCanRecognizeAndGuessExampleFiles(self):
-        LEXERS_TO_TEST = [
-            EasytrieveLexer,
-            JclLexer,
-            RexxLexer,
-        ]
-        for lexerToTest in LEXERS_TO_TEST:
-            self._testCanRecognizeAndGuessExampleFiles(lexerToTest)
+@pytest.mark.parametrize('lexer', [
+    EasytrieveLexer,
+    JclLexer,
+    RexxLexer,
+])
+def test_can_recognize_and_guess_example_files(lexer):
+    for pattern in lexer.filenames:
+        exampleFilesPattern = _example_file_path(pattern)
+        for exampleFilePath in glob.glob(exampleFilesPattern):
+            with open(exampleFilePath, 'rb') as fp:
+                text = fp.read().decode('utf-8')
+            probability = lexer.analyse_text(text)
+            assert probability > 0, '%s must recognize %r' % (
+                lexer.name, exampleFilePath)
+            guessedLexer = guess_lexer(text)
+            assert guessedLexer.name == lexer.name
 
 
-class EasyTrieveLexerTest(unittest.TestCase):
-    def testCanGuessFromText(self):
-        self.assertTrue(EasytrieveLexer.analyse_text('MACRO'))
-        self.assertTrue(EasytrieveLexer.analyse_text('\nMACRO'))
-        self.assertTrue(EasytrieveLexer.analyse_text(' \nMACRO'))
-        self.assertTrue(EasytrieveLexer.analyse_text(' \n MACRO'))
-        self.assertTrue(EasytrieveLexer.analyse_text('*\nMACRO'))
-        self.assertTrue(EasytrieveLexer.analyse_text(
-            '*\n *\n\n \n*\n MACRO'))
+def test_easytrieve_can_guess_from_text():
+    assert EasytrieveLexer.analyse_text('MACRO')
+    assert EasytrieveLexer.analyse_text('\nMACRO')
+    assert EasytrieveLexer.analyse_text(' \nMACRO')
+    assert EasytrieveLexer.analyse_text(' \n MACRO')
+    assert EasytrieveLexer.analyse_text('*\nMACRO')
+    assert EasytrieveLexer.analyse_text('*\n *\n\n \n*\n MACRO')
 
 
-class RexxLexerTest(unittest.TestCase):
-    def testCanGuessFromText(self):
-        self.assertAlmostEqual(0.01, RexxLexer.analyse_text('/* */'))
-        self.assertAlmostEqual(1.0,
-                               RexxLexer.analyse_text('''/* Rexx */
-                say "hello world"'''))
-        val = RexxLexer.analyse_text('/* */\n'
-                                     'hello:pRoceduRe\n'
-                                     '  say "hello world"')
-        self.assertTrue(val > 0.5, val)
-        val = RexxLexer.analyse_text('''/* */
-                if 1 > 0 then do
-                    say "ok"
-                end
-                else do
-                    say "huh?"
-                end''')
-        self.assertTrue(val > 0.2, val)
-        val = RexxLexer.analyse_text('''/* */
-                greeting = "hello world!"
-                parse value greeting "hello" name "!"
-                say name''')
-        self.assertTrue(val > 0.2, val)
+def test_rexx_can_guess_from_text():
+    assert RexxLexer.analyse_text('/* */') == pytest.approx(0.01)
+    assert RexxLexer.analyse_text('''/* Rexx */
+            say "hello world"''') == pytest.approx(1.0)
+    val = RexxLexer.analyse_text('/* */\n'
+                                 'hello:pRoceduRe\n'
+                                 '  say "hello world"')
+    assert val > 0.5
+    val = RexxLexer.analyse_text('''/* */
+            if 1 > 0 then do
+                say "ok"
+            end
+            else do
+                say "huh?"
+            end''')
+    assert val > 0.2
+    val = RexxLexer.analyse_text('''/* */
+            greeting = "hello world!"
+            parse value greeting "hello" name "!"
+            say name''')
+    assert val > 0.2
diff --git a/tests/test_markdown_lexer.py b/tests/test_markdown_lexer.py
index c143586..9024bf0 100644
--- a/tests/test_markdown_lexer.py
+++ b/tests/test_markdown_lexer.py
@@ -1,31 +1,36 @@
 # -*- coding: utf-8 -*-
 """
-    Pygments regex lexer tests
-    ~~~~~~~~~~~~~~~~~~~~~~~~~~
+    Pygments Markdown lexer tests
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
     :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
-import unittest
+
+import pytest
 
 from pygments.lexers.markup import MarkdownLexer
 
 
-class SameTextTests(unittest.TestCase):
+@pytest.fixture(scope='module')
+def lexer():
+    yield MarkdownLexer()
+
+
+def assert_same_text(lexer, text):
+    """Show that lexed markdown does not remove any content. """
+    tokens = list(lexer.get_tokens_unprocessed(text))
+    output = ''.join(t[2] for t in tokens)
+    assert text == output
+
 
-    lexer = MarkdownLexer()
+def test_code_fence(lexer):
+    assert_same_text(lexer, r'```\nfoo\n```\n')
 
-    def assert_same_text(self, text):
-        """Show that lexed markdown does not remove any content. """
-        tokens = list(self.lexer.get_tokens_unprocessed(text))
-        output = ''.join(t[2] for t in tokens)
-        self.assertEqual(text, output)
 
-    def test_code_fence(self):
-        self.assert_same_text(r'```\nfoo\n```\n')
+def test_code_fence_gsm(lexer):
+    assert_same_text(lexer, r'```markdown\nfoo\n```\n')
 
-    def test_code_fence_gsm(self):
-        self.assert_same_text(r'```markdown\nfoo\n```\n')
 
-    def test_code_fence_gsm_with_no_lexer(self):
-        self.assert_same_text(r'```invalid-lexer\nfoo\n```\n')
+def test_code_fence_gsm_with_no_lexer(lexer):
+    assert_same_text(lexer, r'```invalid-lexer\nfoo\n```\n')
diff --git a/tests/test_modeline.py b/tests/test_modeline.py
index 6e1f16a..b120694 100644
--- a/tests/test_modeline.py
+++ b/tests/test_modeline.py
@@ -12,10 +12,7 @@ from __future__ import print_function
 from pygments import modeline
 
 
-def test_lexer_classes():
-    def verify(buf):
-        assert modeline.get_filetype_from_buffer(buf) == 'python'
-
+def test_modelines():
     for buf in [
             'vi: ft=python' + '\n' * 8,
             'vi: ft=python' + '\n' * 8,
@@ -23,4 +20,4 @@ def test_lexer_classes():
             '\n' * 8 + 'ex: filetype=python',
             '\n' * 8 + 'vim: some,other,syn=python\n\n\n\n'
     ]:
-        yield verify, buf
+        assert modeline.get_filetype_from_buffer(buf) == 'python'
diff --git a/tests/test_objectiveclexer.py b/tests/test_objectiveclexer.py
index 3db6a9e..54f31db 100644
--- a/tests/test_objectiveclexer.py
+++ b/tests/test_objectiveclexer.py
@@ -7,86 +7,90 @@
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
-import os
+import pytest
 
 from pygments.token import Token
 from pygments.lexers import ObjectiveCLexer
 
 
-class ObjectiveCLexerTest(unittest.TestCase):
-
-    def setUp(self):
-        self.lexer = ObjectiveCLexer()
-
-    def testLiteralNumberInt(self):
-        fragment = u'@(1);\n'
-        expected = [
-            (Token.Literal, u'@('),
-            (Token.Literal.Number.Integer, u'1'),
-            (Token.Literal, u')'),
-            (Token.Punctuation, u';'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(expected, list(self.lexer.get_tokens(fragment)))
-
-    def testLiteralNumberExpression(self):
-        fragment = u'@(1+2);\n'
-        expected = [
-            (Token.Literal, u'@('),
-            (Token.Literal.Number.Integer, u'1'),
-            (Token.Operator, u'+'),
-            (Token.Literal.Number.Integer, u'2'),
-            (Token.Literal, u')'),
-            (Token.Punctuation, u';'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(expected, list(self.lexer.get_tokens(fragment)))
-
-    def testLiteralNumberNestedExpression(self):
-        fragment = u'@(1+(2+3));\n'
-        expected = [
-            (Token.Literal, u'@('),
-            (Token.Literal.Number.Integer, u'1'),
-            (Token.Operator, u'+'),
-            (Token.Punctuation, u'('),
-            (Token.Literal.Number.Integer, u'2'),
-            (Token.Operator, u'+'),
-            (Token.Literal.Number.Integer, u'3'),
-            (Token.Punctuation, u')'),
-            (Token.Literal, u')'),
-            (Token.Punctuation, u';'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(expected, list(self.lexer.get_tokens(fragment)))
-
-    def testLiteralNumberBool(self):
-        fragment = u'@NO;\n'
-        expected = [
-            (Token.Literal.Number, u'@NO'),
-            (Token.Punctuation, u';'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(expected, list(self.lexer.get_tokens(fragment)))
-
-    def testLiteralNumberBoolExpression(self):
-        fragment = u'@(YES);\n'
-        expected = [
-            (Token.Literal, u'@('),
-            (Token.Name.Builtin, u'YES'),
-            (Token.Literal, u')'),
-            (Token.Punctuation, u';'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(expected, list(self.lexer.get_tokens(fragment)))
-
-    def testModuleImport(self):
-        fragment = u'@import ModuleA;\n'
-        expected = [
-            (Token.Keyword, u'@import'),
-            (Token.Text, u' '),
-            (Token.Name, u'ModuleA'),
-            (Token.Punctuation, u';'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(expected, list(self.lexer.get_tokens(fragment)))
+@pytest.fixture(scope='module')
+def lexer():
+    yield ObjectiveCLexer()
+
+
+def test_literal_number_int(lexer):
+    fragment = u'@(1);\n'
+    expected = [
+        (Token.Literal, u'@('),
+        (Token.Literal.Number.Integer, u'1'),
+        (Token.Literal, u')'),
+        (Token.Punctuation, u';'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == expected
+
+
+def test_literal_number_expression(lexer):
+    fragment = u'@(1+2);\n'
+    expected = [
+        (Token.Literal, u'@('),
+        (Token.Literal.Number.Integer, u'1'),
+        (Token.Operator, u'+'),
+        (Token.Literal.Number.Integer, u'2'),
+        (Token.Literal, u')'),
+        (Token.Punctuation, u';'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == expected
+
+
+def test_literal_number_nested_expression(lexer):
+    fragment = u'@(1+(2+3));\n'
+    expected = [
+        (Token.Literal, u'@('),
+        (Token.Literal.Number.Integer, u'1'),
+        (Token.Operator, u'+'),
+        (Token.Punctuation, u'('),
+        (Token.Literal.Number.Integer, u'2'),
+        (Token.Operator, u'+'),
+        (Token.Literal.Number.Integer, u'3'),
+        (Token.Punctuation, u')'),
+        (Token.Literal, u')'),
+        (Token.Punctuation, u';'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == expected
+
+
+def test_literal_number_bool(lexer):
+    fragment = u'@NO;\n'
+    expected = [
+        (Token.Literal.Number, u'@NO'),
+        (Token.Punctuation, u';'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == expected
+
+
+def test_literal_number_bool_expression(lexer):
+    fragment = u'@(YES);\n'
+    expected = [
+        (Token.Literal, u'@('),
+        (Token.Name.Builtin, u'YES'),
+        (Token.Literal, u')'),
+        (Token.Punctuation, u';'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == expected
+
+
+def test_module_import(lexer):
+    fragment = u'@import ModuleA;\n'
+    expected = [
+        (Token.Keyword, u'@import'),
+        (Token.Text, u' '),
+        (Token.Name, u'ModuleA'),
+        (Token.Punctuation, u';'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == expected
diff --git a/tests/test_perllexer.py b/tests/test_perllexer.py
index 30f9eca..8849bac 100644
--- a/tests/test_perllexer.py
+++ b/tests/test_perllexer.py
@@ -8,150 +8,175 @@
 """
 
 import time
-import unittest
+
+import pytest
 
 from pygments.token import Keyword, Name, String, Text
 from pygments.lexers.perl import PerlLexer
 
 
-class RunawayRegexTest(unittest.TestCase):
-    # A previous version of the Perl lexer would spend a great deal of
-    # time backtracking when given particular strings.  These tests show that
-    # the runaway backtracking doesn't happen any more (at least for the given
-    # cases).
+@pytest.fixture(scope='module')
+def lexer():
+    yield PerlLexer()
+
+
+# Test runaway regexes.
+# A previous version of the Perl lexer would spend a great deal of
+# time backtracking when given particular strings.  These tests show that
+# the runaway backtracking doesn't happen any more (at least for the given
+# cases).
+
+
+# Test helpers.
+
+def assert_single_token(lexer, s, token):
+    """Show that a given string generates only one token."""
+    tokens = list(lexer.get_tokens_unprocessed(s))
+    assert len(tokens) == 1
+    assert s == tokens[0][2]
+    assert token == tokens[0][1]
+
+
+def assert_tokens(lexer, strings, expected_tokens):
+    """Show that a given string generates the expected tokens."""
+    tokens = list(lexer.get_tokens_unprocessed(''.join(strings)))
+    assert len(tokens) == len(expected_tokens)
+    for index, s in enumerate(strings):
+        assert s == tokens[index][2]
+        assert expected_tokens[index] == tokens[index][1]
+
+
+def assert_fast_tokenization(lexer, s):
+    """Show that a given string is tokenized quickly."""
+    start = time.time()
+    tokens = list(lexer.get_tokens_unprocessed(s))
+    end = time.time()
+    # Isn't 10 seconds kind of a long time?  Yes, but we don't want false
+    # positives when the tests are starved for CPU time.
+    if end-start > 10:
+        pytest.fail('tokenization took too long')
+    return tokens
+
+
+# Strings.
+
+def test_single_quote_strings(lexer):
+    assert_single_token(lexer, r"'foo\tbar\\\'baz'", String)
+    assert_fast_tokenization(lexer, "'" + '\\'*999)
+
+
+def test_double_quote_strings(lexer):
+    assert_single_token(lexer, r'"foo\tbar\\\"baz"', String)
+    assert_fast_tokenization(lexer, '"' + '\\'*999)
+
+
+def test_backtick_strings(lexer):
+    assert_single_token(lexer, r'`foo\tbar\\\`baz`', String.Backtick)
+    assert_fast_tokenization(lexer, '`' + '\\'*999)
+
+
+# Regex matches with various delimiters.
+
+def test_match(lexer):
+    assert_single_token(lexer, r'/aa\tbb/', String.Regex)
+    assert_fast_tokenization(lexer, '/' + '\\'*999)
+
+
+def test_match_with_slash(lexer):
+    assert_tokens(lexer, ['m', '/\n\\t\\\\/'], [String.Regex, String.Regex])
+    assert_fast_tokenization(lexer, 'm/xxx\n' + '\\'*999)
+
+
+def test_match_with_bang(lexer):
+    assert_tokens(lexer, ['m', r'!aa\t\!bb!'], [String.Regex, String.Regex])
+    assert_fast_tokenization(lexer, 'm!' + '\\'*999)
+
+
+def test_match_with_brace(lexer):
+    assert_tokens(lexer, ['m', r'{aa\t\}bb}'], [String.Regex, String.Regex])
+    assert_fast_tokenization(lexer, 'm{' + '\\'*999)
+
+
+def test_match_with_angle_brackets(lexer):
+    assert_tokens(lexer, ['m', r'bb>'], [String.Regex, String.Regex])
+    assert_fast_tokenization(lexer, 'm<' + '\\'*999)
+
+
+def test_match_with_parenthesis(lexer):
+    assert_tokens(lexer, ['m', r'(aa\t\)bb)'], [String.Regex, String.Regex])
+    assert_fast_tokenization(lexer, 'm(' + '\\'*999)
 
-    lexer = PerlLexer()
 
-    ### Test helpers.
+def test_match_with_at_sign(lexer):
+    assert_tokens(lexer, ['m', r'@aa\t\@bb@'], [String.Regex, String.Regex])
+    assert_fast_tokenization(lexer, 'm@' + '\\'*999)
 
-    def assert_single_token(self, s, token):
-        """Show that a given string generates only one token."""
-        tokens = list(self.lexer.get_tokens_unprocessed(s))
-        self.assertEqual(len(tokens), 1, tokens)
-        self.assertEqual(s, tokens[0][2])
-        self.assertEqual(token, tokens[0][1])
 
-    def assert_tokens(self, strings, expected_tokens):
-        """Show that a given string generates the expected tokens."""
-        tokens = list(self.lexer.get_tokens_unprocessed(''.join(strings)))
-        self.assertEqual(len(tokens), len(expected_tokens), tokens)
-        for index, s in enumerate(strings):
-            self.assertEqual(s, tokens[index][2])
-            self.assertEqual(expected_tokens[index], tokens[index][1])
+def test_match_with_percent_sign(lexer):
+    assert_tokens(lexer, ['m', r'%aa\t\%bb%'], [String.Regex, String.Regex])
+    assert_fast_tokenization(lexer, 'm%' + '\\'*999)
 
-    def assert_fast_tokenization(self, s):
-        """Show that a given string is tokenized quickly."""
-        start = time.time()
-        tokens = list(self.lexer.get_tokens_unprocessed(s))
-        end = time.time()
-        # Isn't 10 seconds kind of a long time?  Yes, but we don't want false
-        # positives when the tests are starved for CPU time.
-        if end-start > 10:
-            self.fail('tokenization took too long')
-        return tokens
 
-    ### Strings.
+def test_match_with_dollar_sign(lexer):
+    assert_tokens(lexer, ['m', r'$aa\t\$bb$'], [String.Regex, String.Regex])
+    assert_fast_tokenization(lexer, 'm$' + '\\'*999)
 
-    def test_single_quote_strings(self):
-        self.assert_single_token(r"'foo\tbar\\\'baz'", String)
-        self.assert_fast_tokenization("'" + '\\'*999)
 
-    def test_double_quote_strings(self):
-        self.assert_single_token(r'"foo\tbar\\\"baz"', String)
-        self.assert_fast_tokenization('"' + '\\'*999)
+# Regex substitutions with various delimeters.
 
-    def test_backtick_strings(self):
-        self.assert_single_token(r'`foo\tbar\\\`baz`', String.Backtick)
-        self.assert_fast_tokenization('`' + '\\'*999)
+def test_substitution_with_slash(lexer):
+    assert_single_token(lexer, 's/aaa/bbb/g', String.Regex)
+    assert_fast_tokenization(lexer, 's/foo/' + '\\'*999)
 
-    ### Regex matches with various delimiters.
 
-    def test_match(self):
-        self.assert_single_token(r'/aa\tbb/', String.Regex)
-        self.assert_fast_tokenization('/' + '\\'*999)
+def test_substitution_with_at_sign(lexer):
+    assert_single_token(lexer, r's@aaa@bbb@g', String.Regex)
+    assert_fast_tokenization(lexer, 's@foo@' + '\\'*999)
 
-    def test_match_with_slash(self):
-        self.assert_tokens(['m', '/\n\\t\\\\/'], [String.Regex, String.Regex])
-        self.assert_fast_tokenization('m/xxx\n' + '\\'*999)
 
-    def test_match_with_bang(self):
-        self.assert_tokens(['m', r'!aa\t\!bb!'], [String.Regex, String.Regex])
-        self.assert_fast_tokenization('m!' + '\\'*999)
+def test_substitution_with_percent_sign(lexer):
+    assert_single_token(lexer, r's%aaa%bbb%g', String.Regex)
+    assert_fast_tokenization(lexer, 's%foo%' + '\\'*999)
 
-    def test_match_with_brace(self):
-        self.assert_tokens(['m', r'{aa\t\}bb}'], [String.Regex, String.Regex])
-        self.assert_fast_tokenization('m{' + '\\'*999)
 
-    def test_match_with_angle_brackets(self):
-        self.assert_tokens(['m', r'bb>'], [String.Regex, String.Regex])
-        self.assert_fast_tokenization('m<' + '\\'*999)
+def test_substitution_with_brace(lexer):
+    assert_single_token(lexer, r's{aaa}', String.Regex)
+    assert_fast_tokenization(lexer, 's{' + '\\'*999)
 
-    def test_match_with_parenthesis(self):
-        self.assert_tokens(['m', r'(aa\t\)bb)'], [String.Regex, String.Regex])
-        self.assert_fast_tokenization('m(' + '\\'*999)
 
-    def test_match_with_at_sign(self):
-        self.assert_tokens(['m', r'@aa\t\@bb@'], [String.Regex, String.Regex])
-        self.assert_fast_tokenization('m@' + '\\'*999)
+def test_substitution_with_angle_bracket(lexer):
+    assert_single_token(lexer, r's', String.Regex)
+    assert_fast_tokenization(lexer, 's<' + '\\'*999)
 
-    def test_match_with_percent_sign(self):
-        self.assert_tokens(['m', r'%aa\t\%bb%'], [String.Regex, String.Regex])
-        self.assert_fast_tokenization('m%' + '\\'*999)
 
-    def test_match_with_dollar_sign(self):
-        self.assert_tokens(['m', r'$aa\t\$bb$'], [String.Regex, String.Regex])
-        self.assert_fast_tokenization('m$' + '\\'*999)
+def test_substitution_with_square_bracket(lexer):
+    assert_single_token(lexer, r's[aaa]', String.Regex)
+    assert_fast_tokenization(lexer, 's[' + '\\'*999)
 
-    ### Regex substitutions with various delimeters.
 
-    def test_substitution_with_slash(self):
-        self.assert_single_token('s/aaa/bbb/g', String.Regex)
-        self.assert_fast_tokenization('s/foo/' + '\\'*999)
+def test_substitution_with_parenthesis(lexer):
+    assert_single_token(lexer, r's(aaa)', String.Regex)
+    assert_fast_tokenization(lexer, 's(' + '\\'*999)
 
-    def test_substitution_with_at_sign(self):
-        self.assert_single_token(r's@aaa@bbb@g', String.Regex)
-        self.assert_fast_tokenization('s@foo@' + '\\'*999)
 
-    def test_substitution_with_percent_sign(self):
-        self.assert_single_token(r's%aaa%bbb%g', String.Regex)
-        self.assert_fast_tokenization('s%foo%' + '\\'*999)
-
-    def test_substitution_with_brace(self):
-        self.assert_single_token(r's{aaa}', String.Regex)
-        self.assert_fast_tokenization('s{' + '\\'*999)
+# Namespaces/modules
 
-    def test_substitution_with_angle_bracket(self):
-        self.assert_single_token(r's', String.Regex)
-        self.assert_fast_tokenization('s<' + '\\'*999)
-
-    def test_substitution_with_angle_bracket(self):
-        self.assert_single_token(r's', String.Regex)
-        self.assert_fast_tokenization('s<' + '\\'*999)
-
-    def test_substitution_with_square_bracket(self):
-        self.assert_single_token(r's[aaa]', String.Regex)
-        self.assert_fast_tokenization('s[' + '\\'*999)
-
-    def test_substitution_with_parenthesis(self):
-        self.assert_single_token(r's(aaa)', String.Regex)
-        self.assert_fast_tokenization('s(' + '\\'*999)
+def test_package_statement(lexer):
+    assert_tokens(lexer, ['package', ' ', 'Foo'], [Keyword, Text, Name.Namespace])
+    assert_tokens(lexer, ['package', '  ', 'Foo::Bar'], [Keyword, Text, Name.Namespace])
 
-    ### Namespaces/modules
 
-    def test_package_statement(self):
-        self.assert_tokens(['package', ' ', 'Foo'], [Keyword, Text, Name.Namespace])
-        self.assert_tokens(['package', '  ', 'Foo::Bar'], [Keyword, Text, Name.Namespace])
+def test_use_statement(lexer):
+    assert_tokens(lexer, ['use', ' ', 'Foo'], [Keyword, Text, Name.Namespace])
+    assert_tokens(lexer, ['use', '  ', 'Foo::Bar'], [Keyword, Text, Name.Namespace])
 
-    def test_use_statement(self):
-        self.assert_tokens(['use', ' ', 'Foo'], [Keyword, Text, Name.Namespace])
-        self.assert_tokens(['use', '  ', 'Foo::Bar'], [Keyword, Text, Name.Namespace])
 
-    def test_no_statement(self):
-        self.assert_tokens(['no', ' ', 'Foo'], [Keyword, Text, Name.Namespace])
-        self.assert_tokens(['no', '  ', 'Foo::Bar'], [Keyword, Text, Name.Namespace])
+def test_no_statement(lexer):
+    assert_tokens(lexer, ['no', ' ', 'Foo'], [Keyword, Text, Name.Namespace])
+    assert_tokens(lexer, ['no', '  ', 'Foo::Bar'], [Keyword, Text, Name.Namespace])
 
-    def test_require_statement(self):
-        self.assert_tokens(['require', ' ', 'Foo'], [Keyword, Text, Name.Namespace])
-        self.assert_tokens(['require', '  ', 'Foo::Bar'], [Keyword, Text, Name.Namespace])
-        self.assert_tokens(['require', ' ', '"Foo/Bar.pm"'], [Keyword, Text, String])
 
+def test_require_statement(lexer):
+    assert_tokens(lexer, ['require', ' ', 'Foo'], [Keyword, Text, Name.Namespace])
+    assert_tokens(lexer, ['require', '  ', 'Foo::Bar'], [Keyword, Text, Name.Namespace])
+    assert_tokens(lexer, ['require', ' ', '"Foo/Bar.pm"'], [Keyword, Text, String])
diff --git a/tests/test_php.py b/tests/test_php.py
index bb047b9..1660183 100644
--- a/tests/test_php.py
+++ b/tests/test_php.py
@@ -7,30 +7,31 @@
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.lexers import PhpLexer
 from pygments.token import Token
 
 
-class PhpTest(unittest.TestCase):
-    def setUp(self):
-        self.lexer = PhpLexer()
+@pytest.fixture(scope='module')
+def lexer():
+    yield PhpLexer()
 
-    def testStringEscapingRun(self):
-        fragment = '\n'
-        tokens = [
-            (Token.Comment.Preproc, ''),
-            (Token.Other, '\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+def test_string_escaping_run(lexer):
+    fragment = '\n'
+    tokens = [
+        (Token.Comment.Preproc, ''),
+        (Token.Other, '\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_praat.py b/tests/test_praat.py
index 9bf3ce7..61ddfd5 100644
--- a/tests/test_praat.py
+++ b/tests/test_praat.py
@@ -7,124 +7,199 @@
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.token import Token
 from pygments.lexers import PraatLexer
 
-class PraatTest(unittest.TestCase):
-
-    def setUp(self):
-        self.lexer = PraatLexer()
-        self.maxDiff = None
-
-    def testNumericAssignment(self):
-        fragment = u'var = -15e4\n'
-        tokens = [
-            (Token.Text, u'var'),
-            (Token.Text, u' '),
-            (Token.Operator, u'='),
-            (Token.Text, u' '),
-            (Token.Operator, u'-'),
-            (Token.Literal.Number, u'15e4'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testStringAssignment(self):
-        fragment = u'var$ = "foo"\n'
-        tokens = [
-            (Token.Text, u'var$'),
-            (Token.Text, u' '),
-            (Token.Operator, u'='),
-            (Token.Text, u' '),
-            (Token.Literal.String, u'"'),
-            (Token.Literal.String, u'foo'),
-            (Token.Literal.String, u'"'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testStringEscapedQuotes(self):
-        fragment = u'"it said ""foo"""\n'
-        tokens = [
-            (Token.Literal.String, u'"'),
-            (Token.Literal.String, u'it said '),
-            (Token.Literal.String, u'"'),
-            (Token.Literal.String, u'"'),
-            (Token.Literal.String, u'foo'),
-            (Token.Literal.String, u'"'),
-            (Token.Literal.String, u'"'),
-            (Token.Literal.String, u'"'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testFunctionCall(self):
-        fragment = u'selected("Sound", i+(a*b))\n'
-        tokens = [
-            (Token.Name.Function, u'selected'),
-            (Token.Punctuation, u'('),
-            (Token.Literal.String, u'"'),
-            (Token.Literal.String, u'Sound'),
-            (Token.Literal.String, u'"'),
-            (Token.Punctuation, u','),
-            (Token.Text, u' '),
-            (Token.Text, u'i'),
-            (Token.Operator, u'+'),
-            (Token.Text, u'('),
-            (Token.Text, u'a'),
-            (Token.Operator, u'*'),
-            (Token.Text, u'b'),
-            (Token.Text, u')'),
-            (Token.Punctuation, u')'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testBrokenUnquotedString(self):
-        fragment = u'printline string\n... \'interpolated\' string\n'
-        tokens = [
-            (Token.Keyword, u'printline'),
-            (Token.Text, u' '),
-            (Token.Literal.String, u'string'),
-            (Token.Text, u'\n'),
-            (Token.Punctuation, u'...'),
-            (Token.Text, u' '),
-            (Token.Literal.String.Interpol, u"'"),
-            (Token.Literal.String.Interpol, u'interpolated'),
-            (Token.Literal.String.Interpol, u"'"),
-            (Token.Text, u' '),
-            (Token.Literal.String, u'string'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testInlinIf(self):
-        fragment = u'var = if true == 1 then -1 else 0 fi'
-        tokens = [
-            (Token.Text, u'var'),
-            (Token.Text, u' '),
-            (Token.Operator, u'='),
-            (Token.Text, u' '),
-            (Token.Keyword, u'if'),
-            (Token.Text, u' '),
-            (Token.Text, u'true'),
-            (Token.Text, u' '),
-            (Token.Operator, u'=='),
-            (Token.Text, u' '),
-            (Token.Literal.Number, u'1'),
-            (Token.Text, u' '),
-            (Token.Keyword, u'then'),
-            (Token.Text, u' '),
-            (Token.Operator, u'-'),
-            (Token.Literal.Number, u'1'),
-            (Token.Text, u' '),
-            (Token.Keyword, u'else'),
-            (Token.Text, u' '),
-            (Token.Literal.Number, u'0'),
-            (Token.Text, u' '),
-            (Token.Keyword, u'fi'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+@pytest.fixture(scope='module')
+def lexer():
+    yield PraatLexer()
+
+
+def test_numeric_assignment(lexer):
+    fragment = u'var = -15e4\n'
+    tokens = [
+        (Token.Text, u'var'),
+        (Token.Text, u' '),
+        (Token.Operator, u'='),
+        (Token.Text, u' '),
+        (Token.Operator, u'-'),
+        (Token.Literal.Number, u'15e4'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def testStringAssignment(lexer):
+    fragment = u'var$ = "foo"\n'
+    tokens = [
+        (Token.Text, u'var$'),
+        (Token.Text, u' '),
+        (Token.Operator, u'='),
+        (Token.Text, u' '),
+        (Token.Literal.String, u'"'),
+        (Token.Literal.String, u'foo'),
+        (Token.Literal.String, u'"'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_string_escaped_quotes(lexer):
+    fragment = u'"it said ""foo"""\n'
+    tokens = [
+        (Token.Literal.String, u'"'),
+        (Token.Literal.String, u'it said '),
+        (Token.Literal.String, u'"'),
+        (Token.Literal.String, u'"'),
+        (Token.Literal.String, u'foo'),
+        (Token.Literal.String, u'"'),
+        (Token.Literal.String, u'"'),
+        (Token.Literal.String, u'"'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_function_call(lexer):
+    fragment = u'selected("Sound", i+(a*b))\n'
+    tokens = [
+        (Token.Name.Function, u'selected'),
+        (Token.Punctuation, u'('),
+        (Token.Literal.String, u'"'),
+        (Token.Literal.String, u'Sound'),
+        (Token.Literal.String, u'"'),
+        (Token.Punctuation, u','),
+        (Token.Text, u' '),
+        (Token.Text, u'i'),
+        (Token.Operator, u'+'),
+        (Token.Text, u'('),
+        (Token.Text, u'a'),
+        (Token.Operator, u'*'),
+        (Token.Text, u'b'),
+        (Token.Text, u')'),
+        (Token.Punctuation, u')'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_broken_unquoted_string(lexer):
+    fragment = u'printline string\n... \'interpolated\' string\n'
+    tokens = [
+        (Token.Keyword, u'printline'),
+        (Token.Text, u' '),
+        (Token.Literal.String, u'string'),
+        (Token.Text, u'\n'),
+        (Token.Punctuation, u'...'),
+        (Token.Text, u' '),
+        (Token.Literal.String.Interpol, u"'interpolated'"),
+        (Token.Text, u' '),
+        (Token.Literal.String, u'string'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_inline_if(lexer):
+    fragment = u'var = if true == 1 then -1 else 0 fi'
+    tokens = [
+        (Token.Text, u'var'),
+        (Token.Text, u' '),
+        (Token.Operator, u'='),
+        (Token.Text, u' '),
+        (Token.Keyword, u'if'),
+        (Token.Text, u' '),
+        (Token.Text, u'true'),
+        (Token.Text, u' '),
+        (Token.Operator, u'=='),
+        (Token.Text, u' '),
+        (Token.Literal.Number, u'1'),
+        (Token.Text, u' '),
+        (Token.Keyword, u'then'),
+        (Token.Text, u' '),
+        (Token.Operator, u'-'),
+        (Token.Literal.Number, u'1'),
+        (Token.Text, u' '),
+        (Token.Keyword, u'else'),
+        (Token.Text, u' '),
+        (Token.Literal.Number, u'0'),
+        (Token.Text, u' '),
+        (Token.Keyword, u'fi'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+def test_interpolation_boundary(lexer):
+    fragment = u'"\'" + "\'"'
+    tokens = [
+        (Token.Literal.String, u'"'),
+        (Token.Literal.String, u"'"),
+        (Token.Literal.String, u'"'),
+        (Token.Text, u' '),
+        (Token.Operator, u'+'),
+        (Token.Text, u' '),
+        (Token.Literal.String, u'"'),
+        (Token.Literal.String, u"'"),
+        (Token.Literal.String, u'"'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+def test_interpolated_numeric_indexed(lexer):
+    fragment = u"'a[3]'"
+    tokens = [
+        (Token.Literal.String.Interpol, u"'a[3]'"),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+def test_interpolated_numeric_hash(lexer):
+    fragment = u"'a[\"b\"]'"
+    tokens = [
+        (Token.Literal.String.Interpol, u"'a[\"b\"]'"),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+def test_interpolated_string_indexed(lexer):
+    fragment = u"'a$[3]'"
+    tokens = [
+        (Token.Literal.String.Interpol, u"'a$[3]'"),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+def test_interpolated_string_hash(lexer):
+    fragment = u"'a$[\"b\"]'"
+    tokens = [
+        (Token.Literal.String.Interpol, u"'a$[\"b\"]'"),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+def test_interpolated_numeric_with_precision(lexer):
+    fragment = u"'a:3'"
+    tokens = [
+        (Token.Literal.String.Interpol, u"'a:3'"),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+def test_interpolated_indexed_numeric_with_precision(lexer):
+    fragment = u"'a[3]:3'"
+    tokens = [
+        (Token.Literal.String.Interpol, u"'a[3]:3'"),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+def test_interpolated_local_numeric_with_precision(lexer):
+    fragment = u"'a.a:3'"
+    tokens = [
+        (Token.Literal.String.Interpol, u"'a.a:3'"),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_properties.py b/tests/test_properties.py
index aaa8ce2..25368d9 100644
--- a/tests/test_properties.py
+++ b/tests/test_properties.py
@@ -7,83 +7,90 @@
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.lexers.configs import PropertiesLexer
 from pygments.token import Token
 
 
-class PropertiesTest(unittest.TestCase):
-    def setUp(self):
-        self.lexer = PropertiesLexer()
-
-    def test_comments(self):
-        """
-        Assures lines lead by either # or ! are recognized as a comment
-        """
-        fragment = '! a comment\n# also a comment\n'
-        tokens = [
-            (Token.Comment, '! a comment'),
-            (Token.Text, '\n'),
-            (Token.Comment, '# also a comment'),
-            (Token.Text, '\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def test_leading_whitespace_comments(self):
-        fragment = '    # comment\n'
-        tokens = [
-            (Token.Text, '    '),
-            (Token.Comment, '# comment'),
-            (Token.Text, '\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def test_escaped_space_in_key(self):
-        fragment = 'key = value\n'
-        tokens = [
-            (Token.Name.Attribute, 'key'),
-            (Token.Text, ' '),
-            (Token.Operator, '='),
-            (Token.Text, ' '),
-            (Token.Literal.String, 'value'),
-            (Token.Text, '\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def test_escaped_space_in_value(self):
-        fragment = 'key = doubleword\\ value\n'
-        tokens = [
-            (Token.Name.Attribute, 'key'),
-            (Token.Text, ' '),
-            (Token.Operator, '='),
-            (Token.Text, ' '),
-            (Token.Literal.String, 'doubleword\\ value'),
-            (Token.Text, '\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def test_space_delimited_kv_pair(self):
-        fragment = 'key value\n'
-        tokens = [
-            (Token.Name.Attribute, 'key'),
-            (Token.Text, ' '),
-            (Token.Literal.String, 'value\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def test_just_key(self):
-        fragment = 'justkey\n'
-        tokens = [
-            (Token.Name.Attribute, 'justkey'),
-            (Token.Text, '\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def test_just_key_with_space(self):
-        fragment = 'just\\ key\n'
-        tokens = [
-            (Token.Name.Attribute, 'just\\ key'),
-            (Token.Text, '\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+@pytest.fixture(scope='module')
+def lexer():
+    yield PropertiesLexer()
+
+
+def test_comments(lexer):
+    """
+    Assures lines lead by either # or ! are recognized as a comment
+    """
+    fragment = '! a comment\n# also a comment\n'
+    tokens = [
+        (Token.Comment, '! a comment'),
+        (Token.Text, '\n'),
+        (Token.Comment, '# also a comment'),
+        (Token.Text, '\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_leading_whitespace_comments(lexer):
+    fragment = '    # comment\n'
+    tokens = [
+        (Token.Text, '    '),
+        (Token.Comment, '# comment'),
+        (Token.Text, '\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_escaped_space_in_key(lexer):
+    fragment = 'key = value\n'
+    tokens = [
+        (Token.Name.Attribute, 'key'),
+        (Token.Text, ' '),
+        (Token.Operator, '='),
+        (Token.Text, ' '),
+        (Token.Literal.String, 'value'),
+        (Token.Text, '\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_escaped_space_in_value(lexer):
+    fragment = 'key = doubleword\\ value\n'
+    tokens = [
+        (Token.Name.Attribute, 'key'),
+        (Token.Text, ' '),
+        (Token.Operator, '='),
+        (Token.Text, ' '),
+        (Token.Literal.String, 'doubleword\\ value'),
+        (Token.Text, '\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_space_delimited_kv_pair(lexer):
+    fragment = 'key value\n'
+    tokens = [
+        (Token.Name.Attribute, 'key'),
+        (Token.Text, ' '),
+        (Token.Literal.String, 'value\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_just_key(lexer):
+    fragment = 'justkey\n'
+    tokens = [
+        (Token.Name.Attribute, 'justkey'),
+        (Token.Text, '\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_just_key_with_space(lexer):
+    fragment = 'just\\ key\n'
+    tokens = [
+        (Token.Name.Attribute, 'just\\ key'),
+        (Token.Text, '\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_python.py b/tests/test_python.py
index b9c6c49..4e5d5bb 100644
--- a/tests/test_python.py
+++ b/tests/test_python.py
@@ -7,127 +7,129 @@
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.lexers import PythonLexer, Python3Lexer
 from pygments.token import Token
 
 
-class PythonTest(unittest.TestCase):
-    def setUp(self):
-        self.lexer = PythonLexer()
+@pytest.fixture(scope='module')
+def lexer2():
+    yield PythonLexer()
 
-    def test_cls_builtin(self):
-        """
-        Tests that a cls token gets interpreted as a Token.Name.Builtin.Pseudo
 
-        """
-        fragment = 'class TestClass():\n    @classmethod\n    def hello(cls):\n        pass\n'
-        tokens = [
-            (Token.Keyword, 'class'),
-            (Token.Text, ' '),
-            (Token.Name.Class, 'TestClass'),
-            (Token.Punctuation, '('),
-            (Token.Punctuation, ')'),
-            (Token.Punctuation, ':'),
-            (Token.Text, '\n'),
-            (Token.Text, '    '),
-            (Token.Name.Decorator, '@classmethod'),
-            (Token.Text, '\n'),
-            (Token.Text, '    '),
-            (Token.Keyword, 'def'),
-            (Token.Text, ' '),
-            (Token.Name.Function, 'hello'),
-            (Token.Punctuation, '('),
-            (Token.Name.Builtin.Pseudo, 'cls'),
-            (Token.Punctuation, ')'),
-            (Token.Punctuation, ':'),
-            (Token.Text, '\n'),
-            (Token.Text, '        '),
-            (Token.Keyword, 'pass'),
-            (Token.Text, '\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+@pytest.fixture(scope='module')
+def lexer3():
+    yield Python3Lexer()
+
+
+def test_cls_builtin(lexer2):
+    """
+    Tests that a cls token gets interpreted as a Token.Name.Builtin.Pseudo
+    """
+    fragment = 'class TestClass():\n    @classmethod\n    def hello(cls):\n        pass\n'
+    tokens = [
+        (Token.Keyword, 'class'),
+        (Token.Text, ' '),
+        (Token.Name.Class, 'TestClass'),
+        (Token.Punctuation, '('),
+        (Token.Punctuation, ')'),
+        (Token.Punctuation, ':'),
+        (Token.Text, '\n'),
+        (Token.Text, '    '),
+        (Token.Name.Decorator, '@classmethod'),
+        (Token.Text, '\n'),
+        (Token.Text, '    '),
+        (Token.Keyword, 'def'),
+        (Token.Text, ' '),
+        (Token.Name.Function, 'hello'),
+        (Token.Punctuation, '('),
+        (Token.Name.Builtin.Pseudo, 'cls'),
+        (Token.Punctuation, ')'),
+        (Token.Punctuation, ':'),
+        (Token.Text, '\n'),
+        (Token.Text, '        '),
+        (Token.Keyword, 'pass'),
+        (Token.Text, '\n'),
+    ]
+    assert list(lexer2.get_tokens(fragment)) == tokens
+
 
+def test_needs_name(lexer3):
+    """
+    Tests that '@' is recognized as an Operator
+    """
+    fragment = u'S = (H @ beta - r).T @ inv(H @ V @ H.T) @ (H @ beta - r)\n'
+    tokens = [
+        (Token.Name, u'S'),
+        (Token.Text, u' '),
+        (Token.Operator, u'='),
+        (Token.Text, u' '),
+        (Token.Punctuation, u'('),
+        (Token.Name, u'H'),
+        (Token.Text, u' '),
+        (Token.Operator, u'@'),
+        (Token.Text, u' '),
+        (Token.Name, u'beta'),
+        (Token.Text, u' '),
+        (Token.Operator, u'-'),
+        (Token.Text, u' '),
+        (Token.Name, u'r'),
+        (Token.Punctuation, u')'),
+        (Token.Operator, u'.'),
+        (Token.Name, u'T'),
+        (Token.Text, u' '),
+        (Token.Operator, u'@'),
+        (Token.Text, u' '),
+        (Token.Name, u'inv'),
+        (Token.Punctuation, u'('),
+        (Token.Name, u'H'),
+        (Token.Text, u' '),
+        (Token.Operator, u'@'),
+        (Token.Text, u' '),
+        (Token.Name, u'V'),
+        (Token.Text, u' '),
+        (Token.Operator, u'@'),
+        (Token.Text, u' '),
+        (Token.Name, u'H'),
+        (Token.Operator, u'.'),
+        (Token.Name, u'T'),
+        (Token.Punctuation, u')'),
+        (Token.Text, u' '),
+        (Token.Operator, u'@'),
+        (Token.Text, u' '),
+        (Token.Punctuation, u'('),
+        (Token.Name, u'H'),
+        (Token.Text, u' '),
+        (Token.Operator, u'@'),
+        (Token.Text, u' '),
+        (Token.Name, u'beta'),
+        (Token.Text, u' '),
+        (Token.Operator, u'-'),
+        (Token.Text, u' '),
+        (Token.Name, u'r'),
+        (Token.Punctuation, u')'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer3.get_tokens(fragment)) == tokens
 
-class Python3Test(unittest.TestCase):
-    def setUp(self):
-        self.lexer = Python3Lexer()
-        
-    def testNeedsName(self):
-        """
-        Tests that '@' is recognized as an Operator
-        """
-        fragment = u'S = (H @ beta - r).T @ inv(H @ V @ H.T) @ (H @ beta - r)\n'
+
+def test_pep_515(lexer3):
+    """
+    Tests that the lexer can parse numeric literals with underscores
+    """
+    fragments = (
+        (Token.Literal.Number.Integer, u'1_000_000'),
+        (Token.Literal.Number.Float, u'1_000.000_001'),
+        (Token.Literal.Number.Float, u'1_000e1_000j'),
+        (Token.Literal.Number.Hex, u'0xCAFE_F00D'),
+        (Token.Literal.Number.Bin, u'0b_0011_1111_0100_1110'),
+        (Token.Literal.Number.Oct, u'0o_777_123'),
+    )
+
+    for token, fragment in fragments:
         tokens = [
-            (Token.Name, u'S'),
-            (Token.Text, u' '),
-            (Token.Operator, u'='),
-            (Token.Text, u' '),
-            (Token.Punctuation, u'('),
-            (Token.Name, u'H'),
-            (Token.Text, u' '),
-            (Token.Operator, u'@'),
-            (Token.Text, u' '),
-            (Token.Name, u'beta'),
-            (Token.Text, u' '),
-            (Token.Operator, u'-'),
-            (Token.Text, u' '),
-            (Token.Name, u'r'),
-            (Token.Punctuation, u')'),
-            (Token.Operator, u'.'),
-            (Token.Name, u'T'),
-            (Token.Text, u' '),
-            (Token.Operator, u'@'),
-            (Token.Text, u' '),
-            (Token.Name, u'inv'),
-            (Token.Punctuation, u'('),
-            (Token.Name, u'H'),
-            (Token.Text, u' '),
-            (Token.Operator, u'@'),
-            (Token.Text, u' '),
-            (Token.Name, u'V'),
-            (Token.Text, u' '),
-            (Token.Operator, u'@'),
-            (Token.Text, u' '),
-            (Token.Name, u'H'),
-            (Token.Operator, u'.'),
-            (Token.Name, u'T'),
-            (Token.Punctuation, u')'),
-            (Token.Text, u' '),
-            (Token.Operator, u'@'),
-            (Token.Text, u' '),
-            (Token.Punctuation, u'('),
-            (Token.Name, u'H'),
-            (Token.Text, u' '),
-            (Token.Operator, u'@'),
-            (Token.Text, u' '),
-            (Token.Name, u'beta'),
-            (Token.Text, u' '),
-            (Token.Operator, u'-'),
-            (Token.Text, u' '),
-            (Token.Name, u'r'),
-            (Token.Punctuation, u')'),
+            (token, fragment),
             (Token.Text, u'\n'),
         ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def test_pep_515(self):
-        """
-        Tests that the lexer can parse numeric literals with underscores
-        """
-        fragments = (
-            (Token.Literal.Number.Integer, u'1_000_000'),
-            (Token.Literal.Number.Float, u'1_000.000_001'),
-            (Token.Literal.Number.Float, u'1_000e1_000j'),
-            (Token.Literal.Number.Hex, u'0xCAFE_F00D'),
-            (Token.Literal.Number.Bin, u'0b_0011_1111_0100_1110'),
-            (Token.Literal.Number.Oct, u'0o_777_123'),
-        )
-
-        for token, fragment in fragments:
-            tokens = [
-                (token, fragment),
-                (Token.Text, u'\n'),
-            ]
-            self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+        assert list(lexer3.get_tokens(fragment)) == tokens
diff --git a/tests/test_qbasiclexer.py b/tests/test_qbasiclexer.py
index f40b8b6..3c64d69 100644
--- a/tests/test_qbasiclexer.py
+++ b/tests/test_qbasiclexer.py
@@ -7,37 +7,35 @@
     :license: BSD, see LICENSE for details.
 """
 
-import glob
-import os
-import unittest
+import pytest
 
 from pygments.token import Token
 from pygments.lexers.basic import QBasicLexer
 
 
-class QBasicTest(unittest.TestCase):
-    def setUp(self):
-        self.lexer = QBasicLexer()
-        self.maxDiff = None
+@pytest.fixture(scope='module')
+def lexer():
+    yield QBasicLexer()
 
-    def testKeywordsWithDollar(self):
-        fragment = u'DIM x\nx = RIGHT$("abc", 1)\n'
-        expected = [
-            (Token.Keyword.Declaration, u'DIM'),
-            (Token.Text.Whitespace, u' '),
-            (Token.Name.Variable.Global, u'x'),
-            (Token.Text, u'\n'),
-            (Token.Name.Variable.Global, u'x'),
-            (Token.Text.Whitespace, u' '),
-            (Token.Operator, u'='),
-            (Token.Text.Whitespace, u' '),
-            (Token.Keyword.Reserved, u'RIGHT$'),
-            (Token.Punctuation, u'('),
-            (Token.Literal.String.Double, u'"abc"'),
-            (Token.Punctuation, u','),
-            (Token.Text.Whitespace, u' '),
-            (Token.Literal.Number.Integer.Long, u'1'),
-            (Token.Punctuation, u')'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(expected, list(self.lexer.get_tokens(fragment)))
+
+def test_keywords_with_dollar(lexer):
+    fragment = u'DIM x\nx = RIGHT$("abc", 1)\n'
+    expected = [
+        (Token.Keyword.Declaration, u'DIM'),
+        (Token.Text.Whitespace, u' '),
+        (Token.Name.Variable.Global, u'x'),
+        (Token.Text, u'\n'),
+        (Token.Name.Variable.Global, u'x'),
+        (Token.Text.Whitespace, u' '),
+        (Token.Operator, u'='),
+        (Token.Text.Whitespace, u' '),
+        (Token.Keyword.Reserved, u'RIGHT$'),
+        (Token.Punctuation, u'('),
+        (Token.Literal.String.Double, u'"abc"'),
+        (Token.Punctuation, u','),
+        (Token.Text.Whitespace, u' '),
+        (Token.Literal.Number.Integer.Long, u'1'),
+        (Token.Punctuation, u')'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == expected
diff --git a/tests/test_r.py b/tests/test_r.py
index 70148e5..72cb8af 100644
--- a/tests/test_r.py
+++ b/tests/test_r.py
@@ -1,70 +1,75 @@
 # -*- coding: utf-8 -*-
 """
     R Tests
-    ~~~~~~~~~
+    ~~~~~~~
 
-    :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.lexers import SLexer
 from pygments.token import Token, Name, Punctuation
 
 
-class RTest(unittest.TestCase):
-    def setUp(self):
-        self.lexer = SLexer()
+@pytest.fixture(scope='module')
+def lexer():
+    yield SLexer()
 
-    def testCall(self):
-        fragment = u'f(1, a)\n'
-        tokens = [
-            (Name.Function, u'f'),
-            (Punctuation, u'('),
-            (Token.Literal.Number, u'1'),
-            (Punctuation, u','),
-            (Token.Text, u' '),
-            (Token.Name, u'a'),
-            (Punctuation, u')'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
 
-    def testName1(self):
-        fragment = u'._a_2.c'
-        tokens = [
-            (Name, u'._a_2.c'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+def test_call(lexer):
+    fragment = u'f(1, a)\n'
+    tokens = [
+        (Name.Function, u'f'),
+        (Punctuation, u'('),
+        (Token.Literal.Number, u'1'),
+        (Punctuation, u','),
+        (Token.Text, u' '),
+        (Token.Name, u'a'),
+        (Punctuation, u')'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
 
-    def testName2(self):
-        # Invalid names are valid if backticks are used
-        fragment = u'`.1 blah`'
-        tokens = [
-            (Name, u'`.1 blah`'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
 
-    def testName3(self):
-        # Internal backticks can be escaped
-        fragment = u'`.1 \\` blah`'
-        tokens = [
-            (Name, u'`.1 \\` blah`'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+def test_name1(lexer):
+    fragment = u'._a_2.c'
+    tokens = [
+        (Name, u'._a_2.c'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
 
-    def testCustomOperator(self):
-        fragment = u'7 % and % 8'
-        tokens = [
-            (Token.Literal.Number, u'7'),
-            (Token.Text, u' '),
-            (Token.Operator, u'% and %'),
-            (Token.Text, u' '),
-            (Token.Literal.Number, u'8'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+def test_name2(lexer):
+    # Invalid names are valid if backticks are used
+    fragment = u'`.1 blah`'
+    tokens = [
+        (Name, u'`.1 blah`'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_name3(lexer):
+    # Internal backticks can be escaped
+    fragment = u'`.1 \\` blah`'
+    tokens = [
+        (Name, u'`.1 \\` blah`'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_custom_operator(lexer):
+    fragment = u'7 % and % 8'
+    tokens = [
+        (Token.Literal.Number, u'7'),
+        (Token.Text, u' '),
+        (Token.Operator, u'% and %'),
+        (Token.Text, u' '),
+        (Token.Literal.Number, u'8'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_regexlexer.py b/tests/test_regexlexer.py
index adc05a9..4e83236 100644
--- a/tests/test_regexlexer.py
+++ b/tests/test_regexlexer.py
@@ -7,14 +7,18 @@
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.token import Text
-from pygments.lexer import RegexLexer
-from pygments.lexer import default
+from pygments.lexer import RegexLexer, default
 
 
-class TestLexer(RegexLexer):
+@pytest.fixture(scope='module')
+def lexer():
+    yield MyLexer()
+
+
+class MyLexer(RegexLexer):
     """Test tuple state transitions including #pop."""
     tokens = {
         'root': [
@@ -34,33 +38,29 @@ class TestLexer(RegexLexer):
     }
 
 
-class TupleTransTest(unittest.TestCase):
-    def test(self):
-        lx = TestLexer()
-        toks = list(lx.get_tokens_unprocessed('abcde'))
-        self.assertEqual(toks, [
-            (0, Text.Root, 'a'), (1, Text.Rag, 'b'), (2, Text.Rag, 'c'),
-            (3, Text.Beer, 'd'), (4, Text.Root, 'e')])
-
-    def test_multiline(self):
-        lx = TestLexer()
-        toks = list(lx.get_tokens_unprocessed('a\ne'))
-        self.assertEqual(toks, [
-            (0, Text.Root, 'a'), (1, Text, u'\n'), (2, Text.Root, 'e')])
-
-    def test_default(self):
-        lx = TestLexer()
-        toks = list(lx.get_tokens_unprocessed('d'))
-        self.assertEqual(toks, [(0, Text.Beer, 'd')])
-
-
-class PopEmptyTest(unittest.TestCase):
-    def test_regular(self):
-        lx = TestLexer()
-        toks = list(lx.get_tokens_unprocessed('#e'))
-        self.assertEqual(toks, [(0, Text.Root, '#'), (1, Text.Root, 'e')])
-
-    def test_tuple(self):
-        lx = TestLexer()
-        toks = list(lx.get_tokens_unprocessed('@e'))
-        self.assertEqual(toks, [(0, Text.Root, '@'), (1, Text.Root, 'e')])
+def test_tuple(lexer):
+    toks = list(lexer.get_tokens_unprocessed('abcde'))
+    assert toks == [
+        (0, Text.Root, 'a'), (1, Text.Rag, 'b'), (2, Text.Rag, 'c'),
+        (3, Text.Beer, 'd'), (4, Text.Root, 'e')]
+
+
+def test_multiline(lexer):
+    toks = list(lexer.get_tokens_unprocessed('a\ne'))
+    assert toks == [
+        (0, Text.Root, 'a'), (1, Text, u'\n'), (2, Text.Root, 'e')]
+
+
+def test_default(lexer):
+    toks = list(lexer.get_tokens_unprocessed('d'))
+    assert toks == [(0, Text.Beer, 'd')]
+
+
+def test_pop_empty_regular(lexer):
+    toks = list(lexer.get_tokens_unprocessed('#e'))
+    assert toks == [(0, Text.Root, '#'), (1, Text.Root, 'e')]
+
+
+def test_pop_empty_tuple(lexer):
+    toks = list(lexer.get_tokens_unprocessed('@e'))
+    assert toks == [(0, Text.Root, '@'), (1, Text.Root, 'e')]
diff --git a/tests/test_regexopt.py b/tests/test_regexopt.py
index 9c44f49..20d48dd 100644
--- a/tests/test_regexopt.py
+++ b/tests/test_regexopt.py
@@ -9,102 +9,95 @@
 
 import re
 import random
-import unittest
-import itertools
+from itertools import combinations_with_replacement
 
 from pygments.regexopt import regex_opt
 
 ALPHABET = ['a', 'b', 'c', 'd', 'e']
 
-try:
-    from itertools import combinations_with_replacement
-    N_TRIES = 15
-except ImportError:
-    # Python 2.6
-    def combinations_with_replacement(iterable, r):
-        pool = tuple(iterable)
-        n = len(pool)
-        for indices in itertools.product(range(n), repeat=r):
-            if sorted(indices) == list(indices):
-                yield tuple(pool[i] for i in indices)
-    N_TRIES = 9
-
-
-class RegexOptTestCase(unittest.TestCase):
-
-    def generate_keywordlist(self, length):
-        return [''.join(p) for p in
-                combinations_with_replacement(ALPHABET, length)]
-
-    def test_randomly(self):
-        # generate a list of all possible keywords of a certain length using
-        # a restricted alphabet, then choose some to match and make sure only
-        # those do
-        for n in range(3, N_TRIES):
-            kwlist = self.generate_keywordlist(n)
-            to_match = random.sample(kwlist,
-                                     random.randint(1, len(kwlist) - 1))
-            no_match = set(kwlist) - set(to_match)
-            rex = re.compile(regex_opt(to_match))
-            self.assertEqual(rex.groups, 1)
-            for w in to_match:
-                self.assertTrue(rex.match(w))
-            for w in no_match:
-                self.assertFalse(rex.match(w))
-
-    def test_prefix(self):
-        opt = regex_opt(('a', 'b'), prefix=r':{1,2}')
-        print(opt)
-        rex = re.compile(opt)
-        self.assertFalse(rex.match('a'))
-        self.assertTrue(rex.match('::a'))
-        self.assertFalse(rex.match(':::')) # fullmatch
-
-    def test_suffix(self):
-        opt = regex_opt(('a', 'b'), suffix=r':{1,2}')
-        print(opt)
-        rex = re.compile(opt)
-        self.assertFalse(rex.match('a'))
-        self.assertTrue(rex.match('a::'))
-        self.assertFalse(rex.match(':::')) # fullmatch
-
-    def test_suffix_opt(self):
-        # test that detected suffixes remain sorted.
-        opt = regex_opt(('afoo', 'abfoo'))
-        print(opt)
-        rex = re.compile(opt)
-        m = rex.match('abfoo')
-        self.assertEqual(5, m.end())
-
-    def test_different_length_grouping(self):
-        opt = regex_opt(('a', 'xyz'))
-        print(opt)
-        rex = re.compile(opt)
-        self.assertTrue(rex.match('a'))
-        self.assertTrue(rex.match('xyz'))
-        self.assertFalse(rex.match('b'))
-        self.assertEqual(1, rex.groups)
-
-    def test_same_length_grouping(self):
-        opt = regex_opt(('a', 'b'))
-        print(opt)
-        rex = re.compile(opt)
-        self.assertTrue(rex.match('a'))
-        self.assertTrue(rex.match('b'))
-        self.assertFalse(rex.match('x'))
-
-        self.assertEqual(1, rex.groups)
-        groups = rex.match('a').groups()
-        self.assertEqual(('a',), groups)
-
-    def test_same_length_suffix_grouping(self):
-        opt = regex_opt(('a', 'b'), suffix='(m)')
-        print(opt)
-        rex = re.compile(opt)
-        self.assertTrue(rex.match('am'))
-        self.assertTrue(rex.match('bm'))
-        self.assertFalse(rex.match('xm'))
-        self.assertFalse(rex.match('ax'))
-        self.assertEqual(2, rex.groups)
-        groups = rex.match('am').groups()
-        self.assertEqual(('a', 'm'), groups)
+N_TRIES = 15
+
+
+def generate_keywordlist(length):
+    return [''.join(p) for p in
+            combinations_with_replacement(ALPHABET, length)]
+
+
+def test_randomly():
+    # generate a list of all possible keywords of a certain length using
+    # a restricted alphabet, then choose some to match and make sure only
+    # those do
+    for n in range(3, N_TRIES):
+        kwlist = generate_keywordlist(n)
+        to_match = random.sample(kwlist,
+                                 random.randint(1, len(kwlist) - 1))
+        no_match = set(kwlist) - set(to_match)
+        rex = re.compile(regex_opt(to_match))
+        assert rex.groups == 1
+        for w in to_match:
+            assert rex.match(w)
+        for w in no_match:
+            assert not rex.match(w)
+
+
+def test_prefix():
+    opt = regex_opt(('a', 'b'), prefix=r':{1,2}')
+    print(opt)
+    rex = re.compile(opt)
+    assert not rex.match('a')
+    assert rex.match('::a')
+    assert not rex.match(':::')  # fullmatch
+
+
+def test_suffix():
+    opt = regex_opt(('a', 'b'), suffix=r':{1,2}')
+    print(opt)
+    rex = re.compile(opt)
+    assert not rex.match('a')
+    assert rex.match('a::')
+    assert not rex.match(':::')  # fullmatch
+
+
+def test_suffix_opt():
+    # test that detected suffixes remain sorted.
+    opt = regex_opt(('afoo', 'abfoo'))
+    print(opt)
+    rex = re.compile(opt)
+    m = rex.match('abfoo')
+    assert m.end() == 5
+
+
+def test_different_length_grouping():
+    opt = regex_opt(('a', 'xyz'))
+    print(opt)
+    rex = re.compile(opt)
+    assert rex.match('a')
+    assert rex.match('xyz')
+    assert not rex.match('b')
+    assert rex.groups == 1
+
+
+def test_same_length_grouping():
+    opt = regex_opt(('a', 'b'))
+    print(opt)
+    rex = re.compile(opt)
+    assert rex.match('a')
+    assert rex.match('b')
+    assert not rex.match('x')
+
+    assert rex.groups == 1
+    groups = rex.match('a').groups()
+    assert groups == ('a',)
+
+
+def test_same_length_suffix_grouping():
+    opt = regex_opt(('a', 'b'), suffix='(m)')
+    print(opt)
+    rex = re.compile(opt)
+    assert rex.match('am')
+    assert rex.match('bm')
+    assert not rex.match('xm')
+    assert not rex.match('ax')
+    assert rex.groups == 2
+    groups = rex.match('am').groups()
+    assert groups == ('a', 'm')
diff --git a/tests/test_rtf_formatter.py b/tests/test_rtf_formatter.py
index 80ce01f..35179df 100644
--- a/tests/test_rtf_formatter.py
+++ b/tests/test_rtf_formatter.py
@@ -1,109 +1,107 @@
 # -*- coding: utf-8 -*-
 """
     Pygments RTF formatter tests
-    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
     :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
-from string_asserts import StringTests
-
 from pygments.util import StringIO
 from pygments.formatters import RtfFormatter
 from pygments.lexers.special import TextLexer
 
-class RtfFormatterTest(StringTests, unittest.TestCase):
-    foot = (r'\par' '\n' r'}')
-
-    def _escape(self, string):
-        return(string.replace("\n", r"\n"))
-
-    def _build_message(self, *args, **kwargs):
-        string = kwargs.get('string', None)
-        t = self._escape(kwargs.get('t', ''))
-        expected = self._escape(kwargs.get('expected', ''))
-        result = self._escape(kwargs.get('result', ''))
-
-        if string is None:
-            string = (u"The expected output of '{t}'\n"
-                      u"\t\tShould be '{expected}'\n"
-                      u"\t\tActually outputs '{result}'\n"
-                      u"\t(WARNING: Partial Output of Result!)")
-
-        end = -(len(self._escape(self.foot)))
-        start = end-len(expected)
-
-        return string.format(t=t,
-                             result = result[start:end],
-                             expected = expected)
-
-    def format_rtf(self, t):
-        tokensource = list(TextLexer().get_tokens(t))
-        fmt = RtfFormatter()
-        buf = StringIO()
-        fmt.format(tokensource, buf)
-        result = buf.getvalue()
-        buf.close()
-        return result
-
-    def test_rtf_header(self):
-        t = u''
-        result = self.format_rtf(t)
-        expected = r'{\rtf1\ansi\uc0'
-        msg = (u"RTF documents are expected to start with '{expected}'\n"
-               u"\t\tStarts intead with '{result}'\n"
-               u"\t(WARNING: Partial Output of Result!)".format(
-                   expected = expected,
-                   result = result[:len(expected)]))
-        self.assertStartsWith(result, expected, msg)
-
-    def test_rtf_footer(self):
-        t = u''
-        result = self.format_rtf(t)
-        expected = self.foot
-        msg = (u"RTF documents are expected to end with '{expected}'\n"
-               u"\t\tEnds intead with '{result}'\n"
-               u"\t(WARNING: Partial Output of Result!)".format(
-                   expected = self._escape(expected),
-                   result = self._escape(result[-len(expected):])))
-        self.assertEndsWith(result, expected, msg)
-
-    def test_ascii_characters(self):
-        t = u'a b c d ~'
-        result = self.format_rtf(t)
-        expected = (r'a b c d ~')
-        if not result.endswith(self.foot):
-            return(unittest.skip('RTF Footer incorrect'))
-        msg = self._build_message(t=t, result=result, expected=expected)
-        self.assertEndsWith(result, expected+self.foot, msg)
-
-    def test_escape_characters(self):
-        t = u'\\ {{'
-        result = self.format_rtf(t)
-        expected = (r'\\ \{\{')
-        if not result.endswith(self.foot):
-            return(unittest.skip('RTF Footer incorrect'))
-        msg = self._build_message(t=t, result=result, expected=expected)
-        self.assertEndsWith(result, expected+self.foot, msg)
-
-    def test_single_characters(self):
-        t = u'â € ¤ каждой'
-        result = self.format_rtf(t)
-        expected = (r'{\u226} {\u8364} {\u164} '
-                    r'{\u1082}{\u1072}{\u1078}{\u1076}{\u1086}{\u1081}')
-        if not result.endswith(self.foot):
-            return(unittest.skip('RTF Footer incorrect'))
-        msg = self._build_message(t=t, result=result, expected=expected)
-        self.assertEndsWith(result, expected+self.foot, msg)
-
-    def test_double_characters(self):
-        t = u'က 힣 ↕ ↕︎ 鼖'
-        result = self.format_rtf(t)
-        expected = (r'{\u4096} {\u55203} {\u8597} '
-                    r'{\u8597}{\u65038} {\u55422}{\u56859}')
-        if not result.endswith(self.foot):
-            return(unittest.skip('RTF Footer incorrect'))
-        msg = self._build_message(t=t, result=result, expected=expected)
-        self.assertEndsWith(result, expected+self.foot, msg)
+
+foot = (r'\par' '\n' r'}')
+
+
+def _escape(string):
+    return string.replace("\n", r"\n")
+
+
+def _build_message(*args, **kwargs):
+    string = kwargs.get('string', None)
+    t = _escape(kwargs.get('t', ''))
+    expected = _escape(kwargs.get('expected', ''))
+    result = _escape(kwargs.get('result', ''))
+
+    if string is None:
+        string = (u"The expected output of '{t}'\n"
+                  u"\t\tShould be '{expected}'\n"
+                  u"\t\tActually outputs '{result}'\n"
+                  u"\t(WARNING: Partial Output of Result!)")
+
+    end = -len(_escape(foot))
+    start = end - len(expected)
+
+    return string.format(t=t,
+                         result = result[start:end],
+                         expected = expected)
+
+
+def format_rtf(t):
+    tokensource = list(TextLexer().get_tokens(t))
+    fmt = RtfFormatter()
+    buf = StringIO()
+    fmt.format(tokensource, buf)
+    result = buf.getvalue()
+    buf.close()
+    return result
+
+
+def test_rtf_header():
+    t = u''
+    result = format_rtf(t)
+    expected = r'{\rtf1\ansi\uc0'
+    msg = (u"RTF documents are expected to start with '{expected}'\n"
+           u"\t\tStarts intead with '{result}'\n"
+           u"\t(WARNING: Partial Output of Result!)".format(
+               expected=expected,
+               result=result[:len(expected)]))
+    assert result.startswith(expected), msg
+
+
+def test_rtf_footer():
+    t = u''
+    result = format_rtf(t)
+    expected = ''
+    msg = (u"RTF documents are expected to end with '{expected}'\n"
+           u"\t\tEnds intead with '{result}'\n"
+           u"\t(WARNING: Partial Output of Result!)".format(
+               expected=_escape(expected),
+               result=_escape(result[-len(expected):])))
+    assert result.endswith(expected+foot), msg
+
+
+def test_ascii_characters():
+    t = u'a b c d ~'
+    result = format_rtf(t)
+    expected = (r'a b c d ~')
+    msg = _build_message(t=t, result=result, expected=expected)
+    assert result.endswith(expected+foot), msg
+
+
+def test_escape_characters():
+    t = u'\\ {{'
+    result = format_rtf(t)
+    expected = r'\\ \{\{'
+    msg = _build_message(t=t, result=result, expected=expected)
+    assert result.endswith(expected+foot), msg
+
+
+def test_single_characters():
+    t = u'â € ¤ каждой'
+    result = format_rtf(t)
+    expected = (r'{\u226} {\u8364} {\u164} '
+                r'{\u1082}{\u1072}{\u1078}{\u1076}{\u1086}{\u1081}')
+    msg = _build_message(t=t, result=result, expected=expected)
+    assert result.endswith(expected+foot), msg
+
+
+def test_double_characters():
+    t = u'က 힣 ↕ ↕︎ 鼖'
+    result = format_rtf(t)
+    expected = (r'{\u4096} {\u55203} {\u8597} '
+                r'{\u8597}{\u65038} {\u55422}{\u56859}')
+    msg = _build_message(t=t, result=result, expected=expected)
+    assert result.endswith(expected+foot), msg
diff --git a/tests/test_ruby.py b/tests/test_ruby.py
index 45a7746..a6da4bf 100644
--- a/tests/test_ruby.py
+++ b/tests/test_ruby.py
@@ -7,139 +7,143 @@
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.token import Operator, Number, Text, Token
 from pygments.lexers import RubyLexer
 
 
-class RubyTest(unittest.TestCase):
-
-    def setUp(self):
-        self.lexer = RubyLexer()
-        self.maxDiff = None
-
-    def testRangeSyntax1(self):
-        fragment = u'1..3\n'
-        tokens = [
-            (Number.Integer, u'1'),
-            (Operator, u'..'),
-            (Number.Integer, u'3'),
-            (Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testRangeSyntax2(self):
-        fragment = u'1...3\n'
-        tokens = [
-            (Number.Integer, u'1'),
-            (Operator, u'...'),
-            (Number.Integer, u'3'),
-            (Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testRangeSyntax3(self):
-        fragment = u'1 .. 3\n'
-        tokens = [
-            (Number.Integer, u'1'),
-            (Text, u' '),
-            (Operator, u'..'),
-            (Text, u' '),
-            (Number.Integer, u'3'),
-            (Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testInterpolationNestedCurly(self):
-        fragment = (
-            u'"A#{ (3..5).group_by { |x| x/2}.map '
-            u'do |k,v| "#{k}" end.join }" + "Z"\n')
-
-        tokens = [
-            (Token.Literal.String.Double, u'"'),
-            (Token.Literal.String.Double, u'A'),
-            (Token.Literal.String.Interpol, u'#{'),
-            (Token.Text, u' '),
-            (Token.Punctuation, u'('),
-            (Token.Literal.Number.Integer, u'3'),
-            (Token.Operator, u'..'),
-            (Token.Literal.Number.Integer, u'5'),
-            (Token.Punctuation, u')'),
-            (Token.Operator, u'.'),
-            (Token.Name, u'group_by'),
-            (Token.Text, u' '),
-            (Token.Literal.String.Interpol, u'{'),
-            (Token.Text, u' '),
-            (Token.Operator, u'|'),
-            (Token.Name, u'x'),
-            (Token.Operator, u'|'),
-            (Token.Text, u' '),
-            (Token.Name, u'x'),
-            (Token.Operator, u'/'),
-            (Token.Literal.Number.Integer, u'2'),
-            (Token.Literal.String.Interpol, u'}'),
-            (Token.Operator, u'.'),
-            (Token.Name, u'map'),
-            (Token.Text, u' '),
-            (Token.Keyword, u'do'),
-            (Token.Text, u' '),
-            (Token.Operator, u'|'),
-            (Token.Name, u'k'),
-            (Token.Punctuation, u','),
-            (Token.Name, u'v'),
-            (Token.Operator, u'|'),
-            (Token.Text, u' '),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Literal.String.Interpol, u'#{'),
-            (Token.Name, u'k'),
-            (Token.Literal.String.Interpol, u'}'),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Text, u' '),
-            (Token.Keyword, u'end'),
-            (Token.Operator, u'.'),
-            (Token.Name, u'join'),
-            (Token.Text, u' '),
-            (Token.Literal.String.Interpol, u'}'),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Text, u' '),
-            (Token.Operator, u'+'),
-            (Token.Text, u' '),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Literal.String.Double, u'Z'),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testOperatorMethods(self):
-        fragment = u'x.==4\n'
-        tokens = [
-            (Token.Name, u'x'),
-            (Token.Operator, u'.'),
-            (Token.Name.Operator, u'=='),
-            (Token.Literal.Number.Integer, u'4'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testEscapedBracestring(self):
-        fragment = u'str.gsub(%r{\\\\\\\\}, "/")\n'
-        tokens = [
-            (Token.Name, u'str'),
-            (Token.Operator, u'.'),
-            (Token.Name, u'gsub'),
-            (Token.Punctuation, u'('),
-            (Token.Literal.String.Regex, u'%r{'),
-            (Token.Literal.String.Regex, u'\\\\'),
-            (Token.Literal.String.Regex, u'\\\\'),
-            (Token.Literal.String.Regex, u'}'),
-            (Token.Punctuation, u','),
-            (Token.Text, u' '),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Literal.String.Double, u'/'),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Punctuation, u')'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+@pytest.fixture(scope='module')
+def lexer():
+    yield RubyLexer()
+
+
+def test_range_syntax1(lexer):
+    fragment = u'1..3\n'
+    tokens = [
+        (Number.Integer, u'1'),
+        (Operator, u'..'),
+        (Number.Integer, u'3'),
+        (Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_range_syntax2(lexer):
+    fragment = u'1...3\n'
+    tokens = [
+        (Number.Integer, u'1'),
+        (Operator, u'...'),
+        (Number.Integer, u'3'),
+        (Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_range_syntax3(lexer):
+    fragment = u'1 .. 3\n'
+    tokens = [
+        (Number.Integer, u'1'),
+        (Text, u' '),
+        (Operator, u'..'),
+        (Text, u' '),
+        (Number.Integer, u'3'),
+        (Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_interpolation_nested_curly(lexer):
+    fragment = (
+        u'"A#{ (3..5).group_by { |x| x/2}.map '
+        u'do |k,v| "#{k}" end.join }" + "Z"\n')
+
+    tokens = [
+        (Token.Literal.String.Double, u'"'),
+        (Token.Literal.String.Double, u'A'),
+        (Token.Literal.String.Interpol, u'#{'),
+        (Token.Text, u' '),
+        (Token.Punctuation, u'('),
+        (Token.Literal.Number.Integer, u'3'),
+        (Token.Operator, u'..'),
+        (Token.Literal.Number.Integer, u'5'),
+        (Token.Punctuation, u')'),
+        (Token.Operator, u'.'),
+        (Token.Name, u'group_by'),
+        (Token.Text, u' '),
+        (Token.Literal.String.Interpol, u'{'),
+        (Token.Text, u' '),
+        (Token.Operator, u'|'),
+        (Token.Name, u'x'),
+        (Token.Operator, u'|'),
+        (Token.Text, u' '),
+        (Token.Name, u'x'),
+        (Token.Operator, u'/'),
+        (Token.Literal.Number.Integer, u'2'),
+        (Token.Literal.String.Interpol, u'}'),
+        (Token.Operator, u'.'),
+        (Token.Name, u'map'),
+        (Token.Text, u' '),
+        (Token.Keyword, u'do'),
+        (Token.Text, u' '),
+        (Token.Operator, u'|'),
+        (Token.Name, u'k'),
+        (Token.Punctuation, u','),
+        (Token.Name, u'v'),
+        (Token.Operator, u'|'),
+        (Token.Text, u' '),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Literal.String.Interpol, u'#{'),
+        (Token.Name, u'k'),
+        (Token.Literal.String.Interpol, u'}'),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Text, u' '),
+        (Token.Keyword, u'end'),
+        (Token.Operator, u'.'),
+        (Token.Name, u'join'),
+        (Token.Text, u' '),
+        (Token.Literal.String.Interpol, u'}'),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Text, u' '),
+        (Token.Operator, u'+'),
+        (Token.Text, u' '),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Literal.String.Double, u'Z'),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_operator_methods(lexer):
+    fragment = u'x.==4\n'
+    tokens = [
+        (Token.Name, u'x'),
+        (Token.Operator, u'.'),
+        (Token.Name.Operator, u'=='),
+        (Token.Literal.Number.Integer, u'4'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_escaped_bracestring(lexer):
+    fragment = u'str.gsub(%r{\\\\\\\\}, "/")\n'
+    tokens = [
+        (Token.Name, u'str'),
+        (Token.Operator, u'.'),
+        (Token.Name, u'gsub'),
+        (Token.Punctuation, u'('),
+        (Token.Literal.String.Regex, u'%r{'),
+        (Token.Literal.String.Regex, u'\\\\'),
+        (Token.Literal.String.Regex, u'\\\\'),
+        (Token.Literal.String.Regex, u'}'),
+        (Token.Punctuation, u','),
+        (Token.Text, u' '),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Literal.String.Double, u'/'),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Punctuation, u')'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_shell.py b/tests/test_shell.py
index 6b24eb4..64918a2 100644
--- a/tests/test_shell.py
+++ b/tests/test_shell.py
@@ -7,153 +7,175 @@
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.token import Token
 from pygments.lexers import BashLexer, BashSessionLexer, MSDOSSessionLexer
 
 
-class BashTest(unittest.TestCase):
-
-    def setUp(self):
-        self.lexer = BashLexer()
-        self.maxDiff = None
-
-    def testCurlyNoEscapeAndQuotes(self):
-        fragment = u'echo "${a//["b"]/}"\n'
-        tokens = [
-            (Token.Name.Builtin, u'echo'),
-            (Token.Text, u' '),
-            (Token.Literal.String.Double, u'"'),
-            (Token.String.Interpol, u'${'),
-            (Token.Name.Variable, u'a'),
-            (Token.Punctuation, u'//['),
-            (Token.Literal.String.Double, u'"b"'),
-            (Token.Punctuation, u']/'),
-            (Token.String.Interpol, u'}'),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testCurlyWithEscape(self):
-        fragment = u'echo ${a//[\\"]/}\n'
-        tokens = [
-            (Token.Name.Builtin, u'echo'),
-            (Token.Text, u' '),
-            (Token.String.Interpol, u'${'),
-            (Token.Name.Variable, u'a'),
-            (Token.Punctuation, u'//['),
-            (Token.Literal.String.Escape, u'\\"'),
-            (Token.Punctuation, u']/'),
-            (Token.String.Interpol, u'}'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testParsedSingle(self):
-        fragment = u"a=$'abc\\''\n"
-        tokens = [
-            (Token.Name.Variable, u'a'),
-            (Token.Operator, u'='),
-            (Token.Literal.String.Single, u"$'abc\\''"),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testShortVariableNames(self):
-        fragment = u'x="$"\ny="$_"\nz="$abc"\n'
-        tokens = [
-            # single lone $
-            (Token.Name.Variable, u'x'),
-            (Token.Operator, u'='),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Text, u'$'),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Text, u'\n'),
-            # single letter shell var
-            (Token.Name.Variable, u'y'),
-            (Token.Operator, u'='),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Name.Variable, u'$_'),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Text, u'\n'),
-            # multi-letter user var
-            (Token.Name.Variable, u'z'),
-            (Token.Operator, u'='),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Name.Variable, u'$abc'),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testArrayNums(self):
-        fragment = u'a=(1 2 3)\n'
-        tokens = [
-            (Token.Name.Variable, u'a'),
-            (Token.Operator, u'='),
-            (Token.Operator, u'('),
-            (Token.Literal.Number, u'1'),
-            (Token.Text, u' '),
-            (Token.Literal.Number, u'2'),
-            (Token.Text, u' '),
-            (Token.Literal.Number, u'3'),
-            (Token.Operator, u')'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testEndOfLineNums(self):
-        fragment = u'a=1\nb=2 # comment\n'
-        tokens = [
-            (Token.Name.Variable, u'a'),
-            (Token.Operator, u'='),
-            (Token.Literal.Number, u'1'),
-            (Token.Text, u'\n'),
-            (Token.Name.Variable, u'b'),
-            (Token.Operator, u'='),
-            (Token.Literal.Number, u'2'),
-            (Token.Text, u' '),
-            (Token.Comment.Single, u'# comment\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-class BashSessionTest(unittest.TestCase):
-
-    def setUp(self):
-        self.lexer = BashSessionLexer()
-        self.maxDiff = None
-
-    def testNeedsName(self):
-        fragment = u'$ echo \\\nhi\nhi\n'
-        tokens = [
-            (Token.Text, u''),
-            (Token.Generic.Prompt, u'$'),
-            (Token.Text, u' '),
-            (Token.Name.Builtin, u'echo'),
-            (Token.Text, u' '),
-            (Token.Literal.String.Escape, u'\\\n'),
-            (Token.Text, u'hi'),
-            (Token.Text, u'\n'),
-            (Token.Generic.Output, u'hi\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-class MSDOSSessionTest(unittest.TestCase):
-
-    def setUp(self):
-        self.lexer = MSDOSSessionLexer()
-
-    def testGtOnlyPrompt(self):
-        fragment = u'> py\nhi\n'
-        tokens = [
-            (Token.Text, u''),
-            (Token.Generic.Prompt, u'>'),
-            (Token.Text, u' '),
-            (Token.Text, u'py'),
-            (Token.Text, u''),
-            (Token.Text, u'\n'),
-            (Token.Generic.Output, u'hi\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+@pytest.fixture(scope='module')
+def lexer_bash():
+    yield BashLexer()
+
+
+@pytest.fixture(scope='module')
+def lexer_session():
+    yield BashSessionLexer()
+
+
+@pytest.fixture(scope='module')
+def lexer_msdos():
+    yield MSDOSSessionLexer()
+
+
+def test_curly_no_escape_and_quotes(lexer_bash):
+    fragment = u'echo "${a//["b"]/}"\n'
+    tokens = [
+        (Token.Name.Builtin, u'echo'),
+        (Token.Text, u' '),
+        (Token.Literal.String.Double, u'"'),
+        (Token.String.Interpol, u'${'),
+        (Token.Name.Variable, u'a'),
+        (Token.Punctuation, u'//['),
+        (Token.Literal.String.Double, u'"b"'),
+        (Token.Punctuation, u']/'),
+        (Token.String.Interpol, u'}'),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer_bash.get_tokens(fragment)) == tokens
+
+
+def test_curly_with_escape(lexer_bash):
+    fragment = u'echo ${a//[\\"]/}\n'
+    tokens = [
+        (Token.Name.Builtin, u'echo'),
+        (Token.Text, u' '),
+        (Token.String.Interpol, u'${'),
+        (Token.Name.Variable, u'a'),
+        (Token.Punctuation, u'//['),
+        (Token.Literal.String.Escape, u'\\"'),
+        (Token.Punctuation, u']/'),
+        (Token.String.Interpol, u'}'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer_bash.get_tokens(fragment)) == tokens
+
+
+def test_parsed_single(lexer_bash):
+    fragment = u"a=$'abc\\''\n"
+    tokens = [
+        (Token.Name.Variable, u'a'),
+        (Token.Operator, u'='),
+        (Token.Literal.String.Single, u"$'abc\\''"),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer_bash.get_tokens(fragment)) == tokens
+
+
+def test_short_variable_names(lexer_bash):
+    fragment = u'x="$"\ny="$_"\nz="$abc"\n'
+    tokens = [
+        # single lone $
+        (Token.Name.Variable, u'x'),
+        (Token.Operator, u'='),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Text, u'$'),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Text, u'\n'),
+        # single letter shell var
+        (Token.Name.Variable, u'y'),
+        (Token.Operator, u'='),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Name.Variable, u'$_'),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Text, u'\n'),
+        # multi-letter user var
+        (Token.Name.Variable, u'z'),
+        (Token.Operator, u'='),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Name.Variable, u'$abc'),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer_bash.get_tokens(fragment)) == tokens
+
+
+def test_array_nums(lexer_bash):
+    fragment = u'a=(1 2 3)\n'
+    tokens = [
+        (Token.Name.Variable, u'a'),
+        (Token.Operator, u'='),
+        (Token.Operator, u'('),
+        (Token.Literal.Number, u'1'),
+        (Token.Text, u' '),
+        (Token.Literal.Number, u'2'),
+        (Token.Text, u' '),
+        (Token.Literal.Number, u'3'),
+        (Token.Operator, u')'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer_bash.get_tokens(fragment)) == tokens
+
+
+def test_end_of_line_nums(lexer_bash):
+    fragment = u'a=1\nb=2 # comment\n'
+    tokens = [
+        (Token.Name.Variable, u'a'),
+        (Token.Operator, u'='),
+        (Token.Literal.Number, u'1'),
+        (Token.Text, u'\n'),
+        (Token.Name.Variable, u'b'),
+        (Token.Operator, u'='),
+        (Token.Literal.Number, u'2'),
+        (Token.Text, u' '),
+        (Token.Comment.Single, u'# comment\n'),
+    ]
+    assert list(lexer_bash.get_tokens(fragment)) == tokens
+
+
+def test_newline_in_echo(lexer_session):
+    fragment = u'$ echo \\\nhi\nhi\n'
+    tokens = [
+        (Token.Text, u''),
+        (Token.Generic.Prompt, u'$'),
+        (Token.Text, u' '),
+        (Token.Name.Builtin, u'echo'),
+        (Token.Text, u' '),
+        (Token.Literal.String.Escape, u'\\\n'),
+        (Token.Text, u'hi'),
+        (Token.Text, u'\n'),
+        (Token.Generic.Output, u'hi\n'),
+    ]
+    assert list(lexer_session.get_tokens(fragment)) == tokens
+
+
+def test_msdos_gt_only(lexer_msdos):
+    fragment = u'> py\nhi\n'
+    tokens = [
+        (Token.Text, u''),
+        (Token.Generic.Prompt, u'>'),
+        (Token.Text, u' '),
+        (Token.Text, u'py'),
+        (Token.Text, u''),
+        (Token.Text, u'\n'),
+        (Token.Generic.Output, u'hi\n'),
+    ]
+    assert list(lexer_msdos.get_tokens(fragment)) == tokens
+
+def test_virtualenv(lexer_session):
+    fragment = u'(env) [~/project]$ foo -h\n'
+    tokens = [
+        (Token.Text, u''),
+        (Token.Generic.Prompt.VirtualEnv, u'(env)'),
+        (Token.Text, u''),
+        (Token.Text, u' '),
+        (Token.Text, u''),
+        (Token.Generic.Prompt, u'[~/project]$'),
+        (Token.Text, u' '),
+        (Token.Text, u'foo'),
+        (Token.Text, u' '),
+        (Token.Text, u'-h'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer_session.get_tokens(fragment)) == tokens
diff --git a/tests/test_smarty.py b/tests/test_smarty.py
index fb15f7f..2d17255 100644
--- a/tests/test_smarty.py
+++ b/tests/test_smarty.py
@@ -1,40 +1,39 @@
 # -*- coding: utf-8 -*-
 """
     Basic SmartyLexer Test
-    ~~~~~~~~~~~~~~~~~~~~
+    ~~~~~~~~~~~~~~~~~~~~~~
 
     :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
-from pygments.token import Operator, Number, Text, Token
+from pygments.token import Token
 from pygments.lexers import SmartyLexer
 
 
-class SmartyTest(unittest.TestCase):
-
-    def setUp(self):
-        self.lexer = SmartyLexer()
-
-    def testNestedCurly(self):
-        fragment = u'{templateFunction param={anotherFunction} param2=$something}\n'
-        tokens = [
-            (Token.Comment.Preproc, u'{'),
-            (Token.Name.Function, u'templateFunction'),
-            (Token.Text, u' '),
-            (Token.Name.Attribute, u'param'),
-            (Token.Operator, u'='),
-            (Token.Comment.Preproc, u'{'),
-            (Token.Name.Attribute, u'anotherFunction'),
-            (Token.Comment.Preproc, u'}'),
-            (Token.Text, u' '),
-            (Token.Name.Attribute, u'param2'),
-            (Token.Operator, u'='),
-            (Token.Name.Variable, u'$something'),
-            (Token.Comment.Preproc, u'}'),
-            (Token.Other, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
+@pytest.fixture(scope='module')
+def lexer():
+    yield SmartyLexer()
+
+
+def test_nested_curly(lexer):
+    fragment = u'{templateFunction param={anotherFunction} param2=$something}\n'
+    tokens = [
+        (Token.Comment.Preproc, u'{'),
+        (Token.Name.Function, u'templateFunction'),
+        (Token.Text, u' '),
+        (Token.Name.Attribute, u'param'),
+        (Token.Operator, u'='),
+        (Token.Comment.Preproc, u'{'),
+        (Token.Name.Attribute, u'anotherFunction'),
+        (Token.Comment.Preproc, u'}'),
+        (Token.Text, u' '),
+        (Token.Name.Attribute, u'param2'),
+        (Token.Operator, u'='),
+        (Token.Name.Variable, u'$something'),
+        (Token.Comment.Preproc, u'}'),
+        (Token.Other, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_sql.py b/tests/test_sql.py
index 6be3400..efd63be 100644
--- a/tests/test_sql.py
+++ b/tests/test_sql.py
@@ -3,10 +3,11 @@
     Pygments SQL lexers tests
     ~~~~~~~~~~~~~~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
-import unittest
+
+import pytest
 
 from pygments.lexers.sql import name_between_bracket_re, \
     name_between_backtick_re, tsql_go_re, tsql_declare_re, \
@@ -15,104 +16,102 @@ from pygments.lexers.sql import name_between_bracket_re, \
 from pygments.token import Comment, Name, Number, Punctuation, Whitespace
 
 
-class TransactSqlLexerTest(unittest.TestCase):
-
-    def setUp(self):
-        self.lexer = TransactSqlLexer()
-
-    def _assertAreTokensOfType(self, examples, expected_token_type):
-        for test_number, example in enumerate(examples.split(), 1):
-            token_count = 0
-            for token_type, token_value in self.lexer.get_tokens(example):
-                if token_type != Whitespace:
-                    token_count += 1
-                    self.assertEqual(
-                        token_type, expected_token_type,
-                        'token_type #%d for %s is be %s but must be %s' %
-                        (test_number, token_value, token_type, expected_token_type))
-            self.assertEqual(
-                token_count, 1,
-                '%s must yield exactly 1 token instead of %d' %
-                (example, token_count))
-
-    def _assertTokensMatch(self, text, expected_tokens_without_trailing_newline):
-        actual_tokens = tuple(self.lexer.get_tokens(text))
-        if (len(actual_tokens) >= 1) and (actual_tokens[-1] == (Whitespace, '\n')):
-            actual_tokens = tuple(actual_tokens[:-1])
-        self.assertEqual(
-            expected_tokens_without_trailing_newline, actual_tokens,
-            'text must yield expected tokens: %s' % text)
-
-    def test_can_lex_float(self):
-        self._assertAreTokensOfType(
-            '1. 1.e1 .1 1.2 1.2e3 1.2e+3 1.2e-3 1e2', Number.Float)
-        self._assertTokensMatch(
-            '1e2.1e2',
-            ((Number.Float, '1e2'), (Number.Float, '.1e2'))
+@pytest.fixture(scope='module')
+def lexer():
+    yield TransactSqlLexer()
+
+
+def _assert_are_tokens_of_type(lexer, examples, expected_token_type):
+    for test_number, example in enumerate(examples.split(), 1):
+        token_count = 0
+        for token_type, token_value in lexer.get_tokens(example):
+            if token_type != Whitespace:
+                token_count += 1
+                assert token_type == expected_token_type, \
+                    'token_type #%d for %s is be %s but must be %s' % \
+                    (test_number, token_value, token_type, expected_token_type)
+        assert token_count == 1, \
+            '%s must yield exactly 1 token instead of %d' % \
+            (example, token_count)
+
+
+def _assert_tokens_match(lexer, text, expected_tokens_without_trailing_newline):
+    actual_tokens = tuple(lexer.get_tokens(text))
+    if (len(actual_tokens) >= 1) and (actual_tokens[-1] == (Whitespace, '\n')):
+        actual_tokens = tuple(actual_tokens[:-1])
+    assert expected_tokens_without_trailing_newline == actual_tokens, \
+        'text must yield expected tokens: %s' % text
+
+
+def test_can_lex_float(lexer):
+    _assert_are_tokens_of_type(lexer,
+                               '1. 1.e1 .1 1.2 1.2e3 1.2e+3 1.2e-3 1e2',
+                               Number.Float)
+    _assert_tokens_match(lexer,
+                         '1e2.1e2',
+                         ((Number.Float, '1e2'), (Number.Float, '.1e2')))
+
+
+def test_can_reject_almost_float(lexer):
+    _assert_tokens_match(lexer, '.e1', ((Punctuation, '.'), (Name, 'e1')))
+
+
+def test_can_lex_integer(lexer):
+    _assert_are_tokens_of_type(lexer, '1 23 456', Number.Integer)
+
+
+def test_can_lex_names(lexer):
+    _assert_are_tokens_of_type(lexer,
+                               u'thingy thingy123 _thingy _ _123 Ähnliches Müll #temp1 ##temp2',
+                               Name)
+
+
+def test_can_lex_comments(lexer):
+    _assert_tokens_match(lexer, '--\n', ((Comment.Single, '--\n'),))
+    _assert_tokens_match(lexer, '/**/', (
+        (Comment.Multiline, '/*'), (Comment.Multiline, '*/')
+    ))
+    _assert_tokens_match(lexer, '/*/**/*/', (
+        (Comment.Multiline, '/*'),
+        (Comment.Multiline, '/*'),
+        (Comment.Multiline, '*/'),
+        (Comment.Multiline, '*/'),
+    ))
+
+
+def test_can_match_analyze_text_res():
+    assert ['`a`', '`bc`'] == \
+        name_between_backtick_re.findall('select `a`, `bc` from some')
+    assert ['[a]', '[bc]'] == \
+        name_between_bracket_re.findall('select [a], [bc] from some')
+    assert tsql_declare_re.search('--\nDeClaRe @some int;')
+    assert tsql_go_re.search('select 1\ngo\n--')
+    assert tsql_variable_re.search('create procedure dbo.usp_x @a int, @b int')
+
+
+def test_can_analyze_text():
+    mysql_lexer = MySqlLexer()
+    sql_lexer = SqlLexer()
+    tsql_lexer = TransactSqlLexer()
+    code_to_expected_lexer_map = {
+        'select `a`, `bc` from some': mysql_lexer,
+        'select a, bc from some': sql_lexer,
+        'select [a], [bc] from some': tsql_lexer,
+        '-- `a`, `bc`\nselect [a], [bc] from some': tsql_lexer,
+        '-- `a`, `bc`\nselect [a], [bc] from some; go': tsql_lexer,
+    }
+    sql_lexers = set(code_to_expected_lexer_map.values())
+    for code, expected_lexer in code_to_expected_lexer_map.items():
+        ratings_and_lexers = list((lexer.analyse_text(code), lexer.name) for lexer in sql_lexers)
+        best_rating, best_lexer_name  = sorted(ratings_and_lexers, reverse=True)[0]
+        expected_rating = expected_lexer.analyse_text(code)
+        message = (
+            'lexer must be %s (rating %.2f) instead of '
+            '%s (rating %.2f) for analyse_text() on code:\n%s') % (
+            expected_lexer.name,
+            expected_rating,
+            best_lexer_name,
+            best_rating,
+            code
         )
-
-    def test_can_reject_almost_float(self):
-        self._assertTokensMatch(
-            '.e1',
-            ((Punctuation, '.'), (Name, 'e1')))
-
-    def test_can_lex_integer(self):
-        self._assertAreTokensOfType(
-            '1 23 456', Number.Integer)
-
-    def test_can_lex_names(self):
-        self._assertAreTokensOfType(
-            u'thingy thingy123 _thingy _ _123 Ähnliches Müll #temp1 ##temp2', Name)
-
-    def test_can_lex_comments(self):
-        self._assertTokensMatch('--\n', ((Comment.Single, '--\n'),))
-        self._assertTokensMatch('/**/', (
-            (Comment.Multiline, '/*'), (Comment.Multiline, '*/')
-        ))
-        self._assertTokensMatch('/*/**/*/', (
-            (Comment.Multiline, '/*'),
-            (Comment.Multiline, '/*'),
-            (Comment.Multiline, '*/'),
-            (Comment.Multiline, '*/'),
-        ))
-
-
-class SqlAnalyzeTextTest(unittest.TestCase):
-    def test_can_match_analyze_text_res(self):
-        self.assertEqual(['`a`', '`bc`'],
-            name_between_backtick_re.findall('select `a`, `bc` from some'))
-        self.assertEqual(['[a]', '[bc]'],
-            name_between_bracket_re.findall('select [a], [bc] from some'))
-        self.assertTrue(tsql_declare_re.search('--\nDeClaRe @some int;'))
-        self.assertTrue(tsql_go_re.search('select 1\ngo\n--'))
-        self.assertTrue(tsql_variable_re.search(
-            'create procedure dbo.usp_x @a int, @b int'))
-
-    def test_can_analyze_text(self):
-        mysql_lexer = MySqlLexer()
-        sql_lexer = SqlLexer()
-        tsql_lexer = TransactSqlLexer()
-        code_to_expected_lexer_map = {
-            'select `a`, `bc` from some': mysql_lexer,
-            'select a, bc from some': sql_lexer,
-            'select [a], [bc] from some': tsql_lexer,
-            '-- `a`, `bc`\nselect [a], [bc] from some': tsql_lexer,
-            '-- `a`, `bc`\nselect [a], [bc] from some; go': tsql_lexer,
-        }
-        sql_lexers = set(code_to_expected_lexer_map.values())
-        for code, expected_lexer in code_to_expected_lexer_map.items():
-            ratings_and_lexers = list((lexer.analyse_text(code), lexer.name) for lexer in sql_lexers)
-            best_rating, best_lexer_name  = sorted(ratings_and_lexers, reverse=True)[0]
-            expected_rating = expected_lexer.analyse_text(code)
-            message = (
-                'lexer must be %s (rating %.2f) instead of '
-                '%s (rating %.2f) for analyse_text() on code:\n%s') % (
-                expected_lexer.name,
-                expected_rating,
-                best_lexer_name,
-                best_rating,
-                code
-            )
-            self.assertEqual(
-                expected_lexer.name, best_lexer_name, message
-            )
+        assert expected_lexer.name == best_lexer_name, message
diff --git a/tests/test_string_asserts.py b/tests/test_string_asserts.py
deleted file mode 100644
index 737ba20..0000000
--- a/tests/test_string_asserts.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-    Pygments string assert utility tests
-    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
-    :license: BSD, see LICENSE for details.
-"""
-
-import unittest
-from string_asserts import StringTests
-
-class TestStringTests(StringTests, unittest.TestCase):
-
-    def test_startswith_correct(self):
-        self.assertStartsWith("AAA", "A")
-
-    # @unittest.expectedFailure not supported by nose
-    def test_startswith_incorrect(self):
-        self.assertRaises(AssertionError, self.assertStartsWith, "AAA", "B")
-
-    # @unittest.expectedFailure not supported by nose
-    def test_startswith_short(self):
-        self.assertRaises(AssertionError, self.assertStartsWith, "A", "AA")
-
-    def test_endswith_correct(self):
-        self.assertEndsWith("AAA", "A")
-
-    # @unittest.expectedFailure not supported by nose
-    def test_endswith_incorrect(self):
-        self.assertRaises(AssertionError, self.assertEndsWith, "AAA", "B")
-
-    # @unittest.expectedFailure not supported by nose
-    def test_endswith_short(self):
-        self.assertRaises(AssertionError, self.assertEndsWith, "A", "AA")
diff --git a/tests/test_terminal_formatter.py b/tests/test_terminal_formatter.py
index e5a1343..98eab58 100644
--- a/tests/test_terminal_formatter.py
+++ b/tests/test_terminal_formatter.py
@@ -9,7 +9,6 @@
 
 from __future__ import print_function
 
-import unittest
 import re
 
 from pygments.util import StringIO
@@ -37,26 +36,26 @@ def strip_ansi(x):
     return ANSI_RE.sub('', x)
 
 
-class TerminalFormatterTest(unittest.TestCase):
-    def test_reasonable_output(self):
-        out = StringIO()
-        TerminalFormatter().format(DEMO_TOKENS, out)
-        plain = strip_ansi(out.getvalue())
-        self.assertEqual(DEMO_TEXT.count('\n'), plain.count('\n'))
-        print(repr(plain))
+def test_reasonable_output():
+    out = StringIO()
+    TerminalFormatter().format(DEMO_TOKENS, out)
+    plain = strip_ansi(out.getvalue())
+    assert DEMO_TEXT.count('\n') == plain.count('\n')
+    print(repr(plain))
 
-        for a, b in zip(DEMO_TEXT.splitlines(), plain.splitlines()):
-            self.assertEqual(a, b)
+    for a, b in zip(DEMO_TEXT.splitlines(), plain.splitlines()):
+        assert a == b
 
-    def test_reasonable_output_lineno(self):
-        out = StringIO()
-        TerminalFormatter(linenos=True).format(DEMO_TOKENS, out)
-        plain = strip_ansi(out.getvalue())
-        self.assertEqual(DEMO_TEXT.count('\n') + 1, plain.count('\n'))
-        print(repr(plain))
 
-        for a, b in zip(DEMO_TEXT.splitlines(), plain.splitlines()):
-            self.assertTrue(a in b)
+def test_reasonable_output_lineno():
+    out = StringIO()
+    TerminalFormatter(linenos=True).format(DEMO_TOKENS, out)
+    plain = strip_ansi(out.getvalue())
+    assert DEMO_TEXT.count('\n') + 1 == plain.count('\n')
+    print(repr(plain))
+
+    for a, b in zip(DEMO_TEXT.splitlines(), plain.splitlines()):
+        assert a in b
 
 
 class MyStyle(Style):
@@ -68,8 +67,7 @@ class MyStyle(Style):
     }
 
 
-class Terminal256FormatterTest(unittest.TestCase):
-    code = '''
+CODE = '''
 # this should be a comment
 print("Hello World")
 async def function(a,b,c, *d, **kwarg:Bool)->Bool:
@@ -78,25 +76,28 @@ async def function(a,b,c, *d, **kwarg:Bool)->Bool:
 
 '''
 
-    def test_style_html(self):
-        style = HtmlFormatter(style=MyStyle).get_style_defs()
-        self.assertTrue('#555555' in style,
-                        "ansigray for comment not html css style")
-
-    def test_others_work(self):
-        """check other formatters don't crash"""
-        highlight(self.code, Python3Lexer(), LatexFormatter(style=MyStyle))
-        highlight(self.code, Python3Lexer(), HtmlFormatter(style=MyStyle))
-
-    def test_256esc_seq(self):
-        """
-        test that a few escape sequences are actually used when using ansi<> color codes
-        """
-        def termtest(x):
-            return highlight(x, Python3Lexer(),
-                             Terminal256Formatter(style=MyStyle))
-
-        self.assertTrue('32;101' in termtest('0x123'))
-        self.assertTrue('92;42' in termtest('123'))
-        self.assertTrue('90' in termtest('#comment'))
-        self.assertTrue('94;41' in termtest('"String"'))
+
+def test_style_html():
+    style = HtmlFormatter(style=MyStyle).get_style_defs()
+    assert '#555555' in style, "ansigray for comment not html css style"
+
+
+def test_others_work():
+    """Check other formatters don't crash."""
+    highlight(CODE, Python3Lexer(), LatexFormatter(style=MyStyle))
+    highlight(CODE, Python3Lexer(), HtmlFormatter(style=MyStyle))
+
+
+def test_256esc_seq():
+    """
+    Test that a few escape sequences are actually used when using ansi<> color
+    codes.
+    """
+    def termtest(x):
+        return highlight(x, Python3Lexer(),
+                         Terminal256Formatter(style=MyStyle))
+
+    assert '32;101' in termtest('0x123')
+    assert '92;42' in termtest('123')
+    assert '90' in termtest('#comment')
+    assert '94;41' in termtest('"String"')
diff --git a/tests/test_textfmts.py b/tests/test_textfmts.py
index 8a1b8ed..5f36900 100644
--- a/tests/test_textfmts.py
+++ b/tests/test_textfmts.py
@@ -7,35 +7,32 @@
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
-from pygments.token import Operator, Number, Text, Token
+from pygments.token import Token
 from pygments.lexers.textfmts import HttpLexer
 
 
-class RubyTest(unittest.TestCase):
-
-    def setUp(self):
-        self.lexer = HttpLexer()
-        self.maxDiff = None
-
-    def testApplicationXml(self):
-        fragment = u'GET / HTTP/1.0\nContent-Type: application/xml\n\n\n'
-        tokens = [
-            (Token.Name.Tag, u''),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(
-            tokens, list(self.lexer.get_tokens(fragment))[-len(tokens):])
-
-    def testApplicationCalendarXml(self):
-        fragment = u'GET / HTTP/1.0\nContent-Type: application/calendar+xml\n\n\n'
-        tokens = [
-            (Token.Name.Tag, u''),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(
-            tokens, list(self.lexer.get_tokens(fragment))[-len(tokens):])
+@pytest.fixture(scope='module')
+def lexer():
+    yield HttpLexer()
 
+
+def test_application_xml(lexer):
+    fragment = u'GET / HTTP/1.0\nContent-Type: application/xml\n\n\n'
+    tokens = [
+        (Token.Name.Tag, u''),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment))[-len(tokens):] == tokens
+
+
+def test_application_calendar_xml(lexer):
+    fragment = u'GET / HTTP/1.0\nContent-Type: application/calendar+xml\n\n\n'
+    tokens = [
+        (Token.Name.Tag, u''),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment))[-len(tokens):] == tokens
diff --git a/tests/test_token.py b/tests/test_token.py
index fdbcabd..11e4d37 100644
--- a/tests/test_token.py
+++ b/tests/test_token.py
@@ -8,47 +8,45 @@
 """
 
 import copy
-import unittest
 
-from pygments import token
+import pytest
 
+from pygments import token
 
-class TokenTest(unittest.TestCase):
 
-    def test_tokentype(self):
-        e = self.assertEqual
+def test_tokentype():
+    t = token.String
+    assert t.split() == [token.Token, token.Literal, token.String]
+    assert t.__class__ is token._TokenType
 
-        t = token.String
 
-        e(t.split(), [token.Token, token.Literal, token.String])
+def test_functions():
+    assert token.is_token_subtype(token.String, token.String)
+    assert token.is_token_subtype(token.String, token.Literal)
+    assert not token.is_token_subtype(token.Literal, token.String)
 
-        e(t.__class__, token._TokenType)
+    assert token.string_to_tokentype(token.String) is token.String
+    assert token.string_to_tokentype('') is token.Token
+    assert token.string_to_tokentype('String') is token.String
 
-    def test_functions(self):
-        self.assertTrue(token.is_token_subtype(token.String, token.String))
-        self.assertTrue(token.is_token_subtype(token.String, token.Literal))
-        self.assertFalse(token.is_token_subtype(token.Literal, token.String))
 
-        self.assertTrue(token.string_to_tokentype(token.String) is token.String)
-        self.assertTrue(token.string_to_tokentype('') is token.Token)
-        self.assertTrue(token.string_to_tokentype('String') is token.String)
+def test_sanity_check():
+    stp = token.STANDARD_TYPES.copy()
+    stp[token.Token] = '---'  # Token and Text do conflict, that is okay
+    t = {}
+    for k, v in stp.items():
+        t.setdefault(v, []).append(k)
+    if len(t) == len(stp):
+        return  # Okay
 
-    def test_sanity_check(self):
-        stp = token.STANDARD_TYPES.copy()
-        stp[token.Token] = '---' # Token and Text do conflict, that is okay
-        t = {}
-        for k, v in stp.items():
-            t.setdefault(v, []).append(k)
-        if len(t) == len(stp):
-            return # Okay
+    for k, v in t.items():
+        if len(v) > 1:
+            pytest.fail("%r has more than one key: %r" % (k, v))
 
-        for k, v in t.items():
-            if len(v) > 1:
-                self.fail("%r has more than one key: %r" % (k, v))
 
-    def test_copying(self):
-        # Token instances are supposed to be singletons, so copying or even
-        # deepcopying should return themselves
-        t = token.String
-        self.assertIs(t, copy.copy(t))
-        self.assertIs(t, copy.deepcopy(t))
+def test_copying():
+    # Token instances are supposed to be singletons, so copying or even
+    # deepcopying should return themselves
+    t = token.String
+    assert t is copy.copy(t)
+    assert t is copy.deepcopy(t)
diff --git a/tests/test_unistring.py b/tests/test_unistring.py
index 82d74ed..a4b5882 100644
--- a/tests/test_unistring.py
+++ b/tests/test_unistring.py
@@ -8,41 +8,40 @@
 """
 
 import re
-import unittest
 import random
 
 from pygments import unistring as uni
 from pygments.util import unichr
 
 
-class UnistringTest(unittest.TestCase):
-    def test_cats_exist_and_compilable(self):
-        for cat in uni.cats:
-            s = getattr(uni, cat)
-            if s == '':  # Probably Cs on Jython
-                continue
-            print("%s %r" % (cat, s))
-            re.compile('[%s]' % s)
-
-    def _cats_that_match(self, c):
-        matching_cats = []
-        for cat in uni.cats:
-            s = getattr(uni, cat)
-            if s == '':  # Probably Cs on Jython
-                continue
-            if re.compile('[%s]' % s).match(c):
-                matching_cats.append(cat)
-        return matching_cats
-
-    def test_spot_check_types(self):
-        # Each char should match one, and precisely one, category
-        random.seed(0)
-        for i in range(1000):
-            o = random.randint(0, 65535)
-            c = unichr(o)
-            if o > 0xd800 and o <= 0xdfff and not uni.Cs:
-                continue  # Bah, Jython.
-            print(hex(o))
-            cats = self._cats_that_match(c)
-            self.assertEqual(len(cats), 1,
-                             "%d (%s): %s" % (o, c, cats))
+def test_cats_exist_and_compilable():
+    for cat in uni.cats:
+        s = getattr(uni, cat)
+        if s == '':  # Probably Cs on Jython
+            continue
+        print("%s %r" % (cat, s))
+        re.compile('[%s]' % s)
+
+
+def _cats_that_match(c):
+    matching_cats = []
+    for cat in uni.cats:
+        s = getattr(uni, cat)
+        if s == '':  # Probably Cs on Jython
+            continue
+        if re.compile('[%s]' % s).match(c):
+            matching_cats.append(cat)
+    return matching_cats
+
+
+def test_spot_check_types():
+    # Each char should match one, and precisely one, category
+    random.seed(0)
+    for i in range(1000):
+        o = random.randint(0, 65535)
+        c = unichr(o)
+        if o > 0xd800 and o <= 0xdfff and not uni.Cs:
+            continue  # Bah, Jython.
+        print(hex(o))
+        cats = _cats_that_match(c)
+        assert len(cats) == 1, "%d (%s): %s" % (o, c, cats)
diff --git a/tests/test_using_api.py b/tests/test_using_api.py
index 2ab70d0..b5310aa 100644
--- a/tests/test_using_api.py
+++ b/tests/test_using_api.py
@@ -7,12 +7,13 @@
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+from pytest import raises
 
 from pygments.lexer import using, bygroups, this, RegexLexer
 from pygments.token import String, Text, Keyword
 
-class TestLexer(RegexLexer):
+
+class MyLexer(RegexLexer):
     tokens = {
         'root': [
             (r'#.*',
@@ -27,14 +28,13 @@ class TestLexer(RegexLexer):
     }
 
 
-class UsingStateTest(unittest.TestCase):
-    def test_basic(self):
-        expected = [(Text, 'a'), (String, '"'), (Keyword, 'bcd'),
-                    (String, '"'), (Text, 'e\n')]
-        t = list(TestLexer().get_tokens('a"bcd"e'))
-        self.assertEqual(t, expected)
+def test_basic():
+    expected = [(Text, 'a'), (String, '"'), (Keyword, 'bcd'),
+                (String, '"'), (Text, 'e\n')]
+    assert list(MyLexer().get_tokens('a"bcd"e')) == expected
+
 
-    def test_error(self):
-        def gen():
-            return list(TestLexer().get_tokens('#a'))
-        self.assertRaises(KeyError, gen)
+def test_error():
+    def gen():
+        return list(MyLexer().get_tokens('#a'))
+    assert raises(KeyError, gen)
diff --git a/tests/test_util.py b/tests/test_util.py
index 646a403..aa7b7ac 100644
--- a/tests/test_util.py
+++ b/tests/test_util.py
@@ -8,7 +8,8 @@
 """
 
 import re
-import unittest
+
+from pytest import raises
 
 from pygments import util, console
 
@@ -19,195 +20,201 @@ class FakeLexer(object):
     analyse = util.make_analysator(analyse)
 
 
-class UtilTest(unittest.TestCase):
-
-    def test_getoptions(self):
-        raises = self.assertRaises
-        equals = self.assertEqual
-
-        equals(util.get_bool_opt({}, 'a', True), True)
-        equals(util.get_bool_opt({}, 'a', 1), True)
-        equals(util.get_bool_opt({}, 'a', 'true'), True)
-        equals(util.get_bool_opt({}, 'a', 'no'), False)
-        raises(util.OptionError, util.get_bool_opt, {}, 'a', [])
-        raises(util.OptionError, util.get_bool_opt, {}, 'a', 'foo')
-
-        equals(util.get_int_opt({}, 'a', 1), 1)
-        raises(util.OptionError, util.get_int_opt, {}, 'a', [])
-        raises(util.OptionError, util.get_int_opt, {}, 'a', 'bar')
-
-        equals(util.get_list_opt({}, 'a', [1]), [1])
-        equals(util.get_list_opt({}, 'a', '1 2'), ['1', '2'])
-        raises(util.OptionError, util.get_list_opt, {}, 'a', 1)
-
-        equals(util.get_choice_opt({}, 'a', ['foo', 'bar'], 'bar'), 'bar')
-        equals(util.get_choice_opt({}, 'a', ['foo', 'bar'], 'Bar', True), 'bar')
-        raises(util.OptionError, util.get_choice_opt, {}, 'a',
-               ['foo', 'bar'], 'baz')
-
-    def test_docstring_headline(self):
-        def f1():
-            """
-            docstring headline
-
-            other text
-            """
-        def f2():
-            """
-            docstring
-            headline
-
-            other text
-            """
-        def f3():
-            pass
-
-        self.assertEqual(util.docstring_headline(f1), 'docstring headline')
-        self.assertEqual(util.docstring_headline(f2), 'docstring headline')
-        self.assertEqual(util.docstring_headline(f3), '')
-
-    def test_analysator_returns_float(self):
-        # If an analysator wrapped by make_analysator returns a floating point
-        # number, then that number will be returned by the wrapper.
-        self.assertEqual(FakeLexer.analyse('0.5'), 0.5)
-
-    def test_analysator_returns_boolean(self):
-        # If an analysator wrapped by make_analysator returns a boolean value,
-        # then the wrapper will return 1.0 if the boolean was True or 0.0 if
-        # it was False.
-        self.assertEqual(FakeLexer.analyse(True), 1.0)
-        self.assertEqual(FakeLexer.analyse(False), 0.0)
-
-    def test_analysator_raises_exception(self):
-        # If an analysator wrapped by make_analysator raises an exception,
-        # then the wrapper will return 0.0.
-        class ErrorLexer(object):
-            def analyse(text):
-                raise RuntimeError('something bad happened')
-            analyse = util.make_analysator(analyse)
-        self.assertEqual(ErrorLexer.analyse(''), 0.0)
-
-    def test_analysator_value_error(self):
-        # When converting the analysator's return value to a float a
-        # ValueError may occur.  If that happens 0.0 is returned instead.
-        self.assertEqual(FakeLexer.analyse('bad input'), 0.0)
-
-    def test_analysator_type_error(self):
-        # When converting the analysator's return value to a float a
-        # TypeError may occur.  If that happens 0.0 is returned instead.
-        self.assertEqual(FakeLexer.analyse('xxx'), 0.0)
-
-    def test_shebang_matches(self):
-        self.assertTrue(util.shebang_matches('#!/usr/bin/env python\n', r'python(2\.\d)?'))
-        self.assertTrue(util.shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?'))
-        self.assertTrue(util.shebang_matches('#!/usr/bin/startsomethingwith python',
-                                             r'python(2\.\d)?'))
-        self.assertTrue(util.shebang_matches('#!C:\\Python2.4\\Python.exe',
-                                             r'python(2\.\d)?'))
-
-        self.assertFalse(util.shebang_matches('#!/usr/bin/python-ruby',
-                                              r'python(2\.\d)?'))
-        self.assertFalse(util.shebang_matches('#!/usr/bin/python/ruby',
-                                              r'python(2\.\d)?'))
-        self.assertFalse(util.shebang_matches('#!', r'python'))
-
-    def test_doctype_matches(self):
-        self.assertTrue(util.doctype_matches(
-            ' ', 'html.*'))
-        self.assertFalse(util.doctype_matches(
-            '  ', 'html.*'))
-        self.assertTrue(util.html_doctype_matches(
-            ''))
-
-    def test_xml(self):
-        self.assertTrue(util.looks_like_xml(
-            ''))
-        self.assertTrue(util.looks_like_xml('abc'))
-        self.assertFalse(util.looks_like_xml(''))
-
-    def test_unirange(self):
-        first_non_bmp = u'\U00010000'
-        r = re.compile(util.unirange(0x10000, 0x20000))
-        m = r.match(first_non_bmp)
-        self.assertTrue(m)
-        self.assertEqual(m.end(), len(first_non_bmp))
-        self.assertFalse(r.match(u'\uffff'))
-        self.assertFalse(r.match(u'xxx'))
-        # Tests that end is inclusive
-        r = re.compile(util.unirange(0x10000, 0x10000) + '+')
-        # Tests that the plus works for the entire unicode point, if narrow
-        # build
-        m = r.match(first_non_bmp * 2)
-        self.assertTrue(m)
-        self.assertEqual(m.end(), len(first_non_bmp) * 2)
-
-    def test_format_lines(self):
-        lst = ['cat', 'dog']
-        output = util.format_lines('var', lst)
-        d = {}
-        exec(output, d)
-        self.assertTrue(isinstance(d['var'], tuple))
-        self.assertEqual(('cat', 'dog'), d['var'])
-
-    def test_duplicates_removed_seq_types(self):
-        # tuple
-        x = util.duplicates_removed(('a', 'a', 'b'))
-        self.assertEqual(['a', 'b'], x)
-        # list
-        x = util.duplicates_removed(['a', 'a', 'b'])
-        self.assertEqual(['a', 'b'], x)
-        # iterator
-        x = util.duplicates_removed(iter(('a', 'a', 'b')))
-        self.assertEqual(['a', 'b'], x)
-
-    def test_duplicates_removed_nonconsecutive(self):
-        # keeps first
-        x = util.duplicates_removed(('a', 'b', 'a'))
-        self.assertEqual(['a', 'b'], x)
-
-    def test_guess_decode(self):
-        # UTF-8 should be decoded as UTF-8
-        s = util.guess_decode(u'\xff'.encode('utf-8'))
-        self.assertEqual(s, (u'\xff', 'utf-8'))
-
-        # otherwise, it could be latin1 or the locale encoding...
-        import locale
-        s = util.guess_decode(b'\xff')
-        self.assertTrue(s[1] in ('latin1', locale.getpreferredencoding()))
-
-    def test_guess_decode_from_terminal(self):
-        class Term:
-            encoding = 'utf-7'
-
-        s = util.guess_decode_from_terminal(u'\xff'.encode('utf-7'), Term)
-        self.assertEqual(s, (u'\xff', 'utf-7'))
-
-        s = util.guess_decode_from_terminal(u'\xff'.encode('utf-8'), Term)
-        self.assertEqual(s, (u'\xff', 'utf-8'))
-
-    def test_add_metaclass(self):
-        class Meta(type):
-            pass
-
-        @util.add_metaclass(Meta)
-        class Cls:
-            pass
-
-        self.assertEqual(type(Cls), Meta)
-
-
-class ConsoleTest(unittest.TestCase):
-
-    def test_ansiformat(self):
-        f = console.ansiformat
-        c = console.codes
-        all_attrs = f('+*_blue_*+', 'text')
-        self.assertTrue(c['blue'] in all_attrs and c['blink'] in all_attrs
-                        and c['bold'] in all_attrs and c['underline'] in all_attrs
-                        and c['reset'] in all_attrs)
-        self.assertRaises(KeyError, f, '*mauve*', 'text')
-
-    def test_functions(self):
-        self.assertEqual(console.reset_color(), console.codes['reset'])
-        self.assertEqual(console.colorize('blue', 'text'),
-                         console.codes['blue'] + 'text' + console.codes['reset'])
+def test_getoptions():
+    assert util.get_bool_opt({}, 'a', True) is True
+    assert util.get_bool_opt({}, 'a', 1) is True
+    assert util.get_bool_opt({}, 'a', 'true') is True
+    assert util.get_bool_opt({}, 'a', 'no') is False
+    assert raises(util.OptionError, util.get_bool_opt, {}, 'a', [])
+    assert raises(util.OptionError, util.get_bool_opt, {}, 'a', 'foo')
+
+    assert util.get_int_opt({}, 'a', 1) == 1
+    assert raises(util.OptionError, util.get_int_opt, {}, 'a', [])
+    assert raises(util.OptionError, util.get_int_opt, {}, 'a', 'bar')
+
+    assert util.get_list_opt({}, 'a', [1]) == [1]
+    assert util.get_list_opt({}, 'a', '1 2') == ['1', '2']
+    assert raises(util.OptionError, util.get_list_opt, {}, 'a', 1)
+
+    assert util.get_choice_opt({}, 'a', ['foo', 'bar'], 'bar') == 'bar'
+    assert util.get_choice_opt({}, 'a', ['foo', 'bar'], 'Bar', True) == 'bar'
+    assert raises(util.OptionError, util.get_choice_opt, {}, 'a',
+                  ['foo', 'bar'], 'baz')
+
+
+def test_docstring_headline():
+    def f1():
+        """
+        docstring headline
+
+        other text
+        """
+    def f2():
+        """
+        docstring
+        headline
+
+        other text
+        """
+    def f3():
+        pass
+
+    assert util.docstring_headline(f1) == 'docstring headline'
+    assert util.docstring_headline(f2) == 'docstring headline'
+    assert util.docstring_headline(f3) == ''
+
+
+def test_analysator_returns_float():
+    # If an analysator wrapped by make_analysator returns a floating point
+    # number, then that number will be returned by the wrapper.
+    assert FakeLexer.analyse('0.5') == 0.5
+
+
+def test_analysator_returns_boolean():
+    # If an analysator wrapped by make_analysator returns a boolean value,
+    # then the wrapper will return 1.0 if the boolean was True or 0.0 if
+    # it was False.
+    assert FakeLexer.analyse(True) == 1.0
+    assert FakeLexer.analyse(False) == 0.0
+
+
+def test_analysator_raises_exception():
+    # If an analysator wrapped by make_analysator raises an exception,
+    # then the wrapper will return 0.0.
+    class ErrorLexer(object):
+        def analyse(text):
+            raise RuntimeError('something bad happened')
+        analyse = util.make_analysator(analyse)
+    assert ErrorLexer.analyse('') == 0.0
+
+
+def test_analysator_value_error():
+    # When converting the analysator's return value to a float a
+    # ValueError may occur.  If that happens 0.0 is returned instead.
+    assert FakeLexer.analyse('bad input') == 0.0
+
+
+def test_analysator_type_error():
+    # When converting the analysator's return value to a float a
+    # TypeError may occur.  If that happens 0.0 is returned instead.
+    assert FakeLexer.analyse('xxx') == 0.0
+
+
+def test_shebang_matches():
+    assert util.shebang_matches('#!/usr/bin/env python\n', r'python(2\.\d)?')
+    assert util.shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
+    assert util.shebang_matches('#!/usr/bin/startsomethingwith python',
+                                r'python(2\.\d)?')
+    assert util.shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
+
+    assert not util.shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
+    assert not util.shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
+    assert not util.shebang_matches('#!', r'python')
+
+
+def test_doctype_matches():
+    assert util.doctype_matches(' ', 'html.*')
+    assert not util.doctype_matches(
+        '  ', 'html.*')
+    assert util.html_doctype_matches(
+        '')
+
+
+def test_xml():
+    assert util.looks_like_xml(
+        '')
+    assert util.looks_like_xml('abc')
+    assert not util.looks_like_xml('')
+
+
+def test_unirange():
+    first_non_bmp = u'\U00010000'
+    r = re.compile(util.unirange(0x10000, 0x20000))
+    m = r.match(first_non_bmp)
+    assert m
+    assert m.end() == len(first_non_bmp)
+    assert not r.match(u'\uffff')
+    assert not r.match(u'xxx')
+    # Tests that end is inclusive
+    r = re.compile(util.unirange(0x10000, 0x10000) + '+')
+    # Tests that the plus works for the entire unicode point, if narrow
+    # build
+    m = r.match(first_non_bmp * 2)
+    assert m
+    assert m.end() == len(first_non_bmp) * 2
+
+
+def test_format_lines():
+    lst = ['cat', 'dog']
+    output = util.format_lines('var', lst)
+    d = {}
+    exec(output, d)
+    assert isinstance(d['var'], tuple)
+    assert ('cat', 'dog') == d['var']
+
+
+def test_duplicates_removed_seq_types():
+    # tuple
+    x = util.duplicates_removed(('a', 'a', 'b'))
+    assert ['a', 'b'] == x
+    # list
+    x = util.duplicates_removed(['a', 'a', 'b'])
+    assert ['a', 'b'] == x
+    # iterator
+    x = util.duplicates_removed(iter(('a', 'a', 'b')))
+    assert ['a', 'b'] == x
+
+
+def test_duplicates_removed_nonconsecutive():
+    # keeps first
+    x = util.duplicates_removed(('a', 'b', 'a'))
+    assert ['a', 'b'] == x
+
+
+def test_guess_decode():
+    # UTF-8 should be decoded as UTF-8
+    s = util.guess_decode(u'\xff'.encode('utf-8'))
+    assert s == (u'\xff', 'utf-8')
+
+    # otherwise, it could be latin1 or the locale encoding...
+    import locale
+    s = util.guess_decode(b'\xff')
+    assert s[1] in ('latin1', locale.getpreferredencoding())
+
+
+def test_guess_decode_from_terminal():
+    class Term:
+        encoding = 'utf-7'
+
+    s = util.guess_decode_from_terminal(u'\xff'.encode('utf-7'), Term)
+    assert s == (u'\xff', 'utf-7')
+
+    s = util.guess_decode_from_terminal(u'\xff'.encode('utf-8'), Term)
+    assert s == (u'\xff', 'utf-8')
+
+
+def test_add_metaclass():
+    class Meta(type):
+        pass
+
+    @util.add_metaclass(Meta)
+    class Cls:
+        pass
+
+    assert type(Cls) is Meta
+
+
+def test_console_ansiformat():
+    f = console.ansiformat
+    c = console.codes
+    all_attrs = f('+*_blue_*+', 'text')
+    assert c['blue'] in all_attrs and c['blink'] in all_attrs
+    assert c['bold'] in all_attrs and c['underline'] in all_attrs
+    assert c['reset'] in all_attrs
+    assert raises(KeyError, f, '*mauve*', 'text')
+
+
+def test_console_functions():
+    assert console.reset_color() == console.codes['reset']
+    assert console.colorize('blue', 'text') == \
+        console.codes['blue'] + 'text' + console.codes['reset']
diff --git a/tests/test_whiley.py b/tests/test_whiley.py
index f447ffe..84fef25 100644
--- a/tests/test_whiley.py
+++ b/tests/test_whiley.py
@@ -3,28 +3,29 @@
     Whiley Test
     ~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.lexers import WhileyLexer
 from pygments.token import Token
 
 
-class WhileyTest(unittest.TestCase):
-    def setUp(self):
-        self.lexer = WhileyLexer()
+@pytest.fixture(scope='module')
+def lexer():
+    yield WhileyLexer()
 
-    def testWhileyOperator(self):
-        fragment = u'123 \u2200 x\n'
-        tokens = [
-            (Token.Literal.Number.Integer, u'123'),
-            (Token.Text, u' '),
-            (Token.Operator, u'\u2200'),
-            (Token.Text, u' '),
-            (Token.Name, u'x'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+def test_whiley_operator(lexer):
+    fragment = u'123 \u2200 x\n'
+    tokens = [
+        (Token.Literal.Number.Integer, u'123'),
+        (Token.Text, u' '),
+        (Token.Operator, u'\u2200'),
+        (Token.Text, u' '),
+        (Token.Name, u'x'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tox.ini b/tox.ini
deleted file mode 100644
index 2c63c29..0000000
--- a/tox.ini
+++ /dev/null
@@ -1,7 +0,0 @@
-[tox]
-envlist = py27, py35, py36, py37
-[testenv]
-deps =
-    nose
-    coverage
-commands = python -d tests/run.py {posargs}