From: JinWang An Date: Tue, 5 Jan 2021 03:22:59 +0000 (+0900) Subject: Imported Upstream version 2.7.0 X-Git-Tag: upstream/2.7.0^0 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=177243f10e01371109e766b3be2d965ac8fd1fa2;p=platform%2Fupstream%2Fpython3-pygments.git Imported Upstream version 2.7.0 --- diff --git a/AUTHORS b/AUTHORS old mode 100644 new mode 100755 index 3dc2234..05d5d70 --- a/AUTHORS +++ b/AUTHORS @@ -1,231 +1,237 @@ -Pygments is written and maintained by Georg Brandl . - -Major developers are Tim Hatch and Armin Ronacher -. - -Other contributors, listed alphabetically, are: - -* Sam Aaron -- Ioke lexer -* Ali Afshar -- image formatter -* Thomas Aglassinger -- Easytrieve, JCL, Rexx, Transact-SQL and VBScript - lexers -* Muthiah Annamalai -- Ezhil lexer -* Kumar Appaiah -- Debian control lexer -* Andreas Amann -- AppleScript lexer -* Timothy Armstrong -- Dart lexer fixes -* Jeffrey Arnold -- R/S, Rd, BUGS, Jags, and Stan lexers -* Jeremy Ashkenas -- CoffeeScript lexer -* José Joaquín Atria -- Praat lexer -* Stefan Matthias Aust -- Smalltalk lexer -* Lucas Bajolet -- Nit lexer -* Ben Bangert -- Mako lexers -* Max Battcher -- Darcs patch lexer -* Thomas Baruchel -- APL lexer -* Tim Baumann -- (Literate) Agda lexer -* Paul Baumgart, 280 North, Inc. -- Objective-J lexer -* Michael Bayer -- Myghty lexers -* Thomas Beale -- Archetype lexers -* John Benediktsson -- Factor lexer -* Trevor Bergeron -- mIRC formatter -* Vincent Bernat -- LessCSS lexer -* Christopher Bertels -- Fancy lexer -* Sébastien Bigaret -- QVT Operational lexer -* Jarrett Billingsley -- MiniD lexer -* Adam Blinkinsop -- Haskell, Redcode lexers -* Stéphane Blondon -- SGF and Sieve lexers -* Frits van Bommel -- assembler lexers -* Pierre Bourdon -- bugfixes -* Martijn Braam -- Kernel log lexer -* Matthias Bussonnier -- ANSI style handling for terminal-256 formatter -* chebee7i -- Python traceback lexer improvements -* Hiram Chirino -- Scaml and Jade lexers -* Mauricio Caceres -- SAS and Stata lexers. -* Ian Cooper -- VGL lexer -* David Corbett -- Inform, Jasmin, JSGF, Snowball, and TADS 3 lexers -* Leaf Corcoran -- MoonScript lexer -* Christopher Creutzig -- MuPAD lexer -* Daniël W. Crompton -- Pike lexer -* Pete Curry -- bugfixes -* Bryan Davis -- EBNF lexer -* Bruno Deferrari -- Shen lexer -* Giedrius Dubinskas -- HTML formatter improvements -* Owen Durni -- Haxe lexer -* Alexander Dutton, Oxford University Computing Services -- SPARQL lexer -* James Edwards -- Terraform lexer -* Nick Efford -- Python 3 lexer -* Sven Efftinge -- Xtend lexer -* Artem Egorkine -- terminal256 formatter -* Matthew Fernandez -- CAmkES lexer -* Michael Ficarra -- CPSA lexer -* James H. Fisher -- PostScript lexer -* William S. Fulton -- SWIG lexer -* Carlos Galdino -- Elixir and Elixir Console lexers -* Michael Galloy -- IDL lexer -* Naveen Garg -- Autohotkey lexer -* Simon Garnotel -- FreeFem++ lexer -* Laurent Gautier -- R/S lexer -* Alex Gaynor -- PyPy log lexer -* Richard Gerkin -- Igor Pro lexer -* Alain Gilbert -- TypeScript lexer -* Alex Gilding -- BlitzBasic lexer -* GitHub, Inc -- DASM16, Augeas, TOML, and Slash lexers -* Bertrand Goetzmann -- Groovy lexer -* Krzysiek Goj -- Scala lexer -* Rostyslav Golda -- FloScript lexer -* Andrey Golovizin -- BibTeX lexers -* Matt Good -- Genshi, Cheetah lexers -* Michał Górny -- vim modeline support -* Alex Gosse -- TrafficScript lexer -* Patrick Gotthardt -- PHP namespaces support -* Olivier Guibe -- Asymptote lexer -* Phil Hagelberg -- Fennel lexer -* Florian Hahn -- Boogie lexer -* Martin Harriman -- SNOBOL lexer -* Matthew Harrison -- SVG formatter -* Steven Hazel -- Tcl lexer -* Dan Michael Heggø -- Turtle lexer -* Aslak Hellesøy -- Gherkin lexer -* Greg Hendershott -- Racket lexer -* Justin Hendrick -- ParaSail lexer -* Jordi Gutiérrez Hermoso -- Octave lexer -* David Hess, Fish Software, Inc. -- Objective-J lexer -* Varun Hiremath -- Debian control lexer -* Rob Hoelz -- Perl 6 lexer -* Doug Hogan -- Mscgen lexer -* Ben Hollis -- Mason lexer -* Max Horn -- GAP lexer -* Alastair Houghton -- Lexer inheritance facility -* Tim Howard -- BlitzMax lexer -* Dustin Howett -- Logos lexer -* Ivan Inozemtsev -- Fantom lexer -* Hiroaki Itoh -- Shell console rewrite, Lexers for PowerShell session, - MSDOS session, BC, WDiff -* Brian R. Jackson -- Tea lexer -* Christian Jann -- ShellSession lexer -* Dennis Kaarsemaker -- sources.list lexer -* Dmitri Kabak -- Inferno Limbo lexer -* Igor Kalnitsky -- vhdl lexer -* Colin Kennedy - USD lexer -* Alexander Kit -- MaskJS lexer -* Pekka Klärck -- Robot Framework lexer -* Gerwin Klein -- Isabelle lexer -* Eric Knibbe -- Lasso lexer -* Stepan Koltsov -- Clay lexer -* Adam Koprowski -- Opa lexer -* Benjamin Kowarsch -- Modula-2 lexer -* Domen Kožar -- Nix lexer -* Oleh Krekel -- Emacs Lisp lexer -* Alexander Kriegisch -- Kconfig and AspectJ lexers -* Marek Kubica -- Scheme lexer -* Jochen Kupperschmidt -- Markdown processor -* Gerd Kurzbach -- Modelica lexer -* Jon Larimer, Google Inc. -- Smali lexer -* Olov Lassus -- Dart lexer -* Matt Layman -- TAP lexer -* Kristian Lyngstøl -- Varnish lexers -* Sylvestre Ledru -- Scilab lexer -* Chee Sing Lee -- Flatline lexer -* Mark Lee -- Vala lexer -* Valentin Lorentz -- C++ lexer improvements -* Ben Mabey -- Gherkin lexer -* Angus MacArthur -- QML lexer -* Louis Mandel -- X10 lexer -* Louis Marchand -- Eiffel lexer -* Simone Margaritelli -- Hybris lexer -* Kirk McDonald -- D lexer -* Gordon McGregor -- SystemVerilog lexer -* Stephen McKamey -- Duel/JBST lexer -* Brian McKenna -- F# lexer -* Charles McLaughlin -- Puppet lexer -* Kurt McKee -- Tera Term macro lexer -* Lukas Meuser -- BBCode formatter, Lua lexer -* Cat Miller -- Pig lexer -* Paul Miller -- LiveScript lexer -* Hong Minhee -- HTTP lexer -* Michael Mior -- Awk lexer -* Bruce Mitchener -- Dylan lexer rewrite -* Reuben Morais -- SourcePawn lexer -* Jon Morton -- Rust lexer -* Paulo Moura -- Logtalk lexer -* Mher Movsisyan -- DTD lexer -* Dejan Muhamedagic -- Crmsh lexer -* Ana Nelson -- Ragel, ANTLR, R console lexers -* Kurt Neufeld -- Markdown lexer -* Nam T. Nguyen -- Monokai style -* Jesper Noehr -- HTML formatter "anchorlinenos" -* Mike Nolta -- Julia lexer -* Jonas Obrist -- BBCode lexer -* Edward O'Callaghan -- Cryptol lexer -* David Oliva -- Rebol lexer -* Pat Pannuto -- nesC lexer -* Jon Parise -- Protocol buffers and Thrift lexers -* Benjamin Peterson -- Test suite refactoring -* Ronny Pfannschmidt -- BBCode lexer -* Dominik Picheta -- Nimrod lexer -* Andrew Pinkham -- RTF Formatter Refactoring -* Clément Prévost -- UrbiScript lexer -* Tanner Prynn -- cmdline -x option and loading lexers from files -* Oleh Prypin -- Crystal lexer (based on Ruby lexer) -* Xidorn Quan -- Web IDL lexer -* Elias Rabel -- Fortran fixed form lexer -* raichoo -- Idris lexer -* Kashif Rasul -- CUDA lexer -* Nathan Reed -- HLSL lexer -* Justin Reidy -- MXML lexer -* Norman Richards -- JSON lexer -* Corey Richardson -- Rust lexer updates -* Lubomir Rintel -- GoodData MAQL and CL lexers -* Andre Roberge -- Tango style -* Georg Rollinger -- HSAIL lexer -* Michiel Roos -- TypoScript lexer -* Konrad Rudolph -- LaTeX formatter enhancements -* Mario Ruggier -- Evoque lexers -* Miikka Salminen -- Lovelace style, Hexdump lexer, lexer enhancements -* Stou Sandalski -- NumPy, FORTRAN, tcsh and XSLT lexers -* Matteo Sasso -- Common Lisp lexer -* Joe Schafer -- Ada lexer -* Ken Schutte -- Matlab lexers -* René Schwaiger -- Rainbow Dash style -* Sebastian Schweizer -- Whiley lexer -* Tassilo Schweyer -- Io, MOOCode lexers -* Ted Shaw -- AutoIt lexer -* Joerg Sieker -- ABAP lexer -* Robert Simmons -- Standard ML lexer -* Kirill Simonov -- YAML lexer -* Corbin Simpson -- Monte lexer -* Alexander Smishlajev -- Visual FoxPro lexer -* Steve Spigarelli -- XQuery lexer -* Jerome St-Louis -- eC lexer -* Camil Staps -- Clean and NuSMV lexers; Solarized style -* James Strachan -- Kotlin lexer -* Tom Stuart -- Treetop lexer -* Colin Sullivan -- SuperCollider lexer -* Ben Swift -- Extempore lexer -* Edoardo Tenani -- Arduino lexer -* Tiberius Teng -- default style overhaul -* Jeremy Thurgood -- Erlang, Squid config lexers -* Brian Tiffin -- OpenCOBOL lexer -* Bob Tolbert -- Hy lexer -* Matthias Trute -- Forth lexer -* Erick Tryzelaar -- Felix lexer -* Alexander Udalov -- Kotlin lexer improvements -* Thomas Van Doren -- Chapel lexer -* Daniele Varrazzo -- PostgreSQL lexers -* Abe Voelker -- OpenEdge ABL lexer -* Pepijn de Vos -- HTML formatter CTags support -* Matthias Vallentin -- Bro lexer -* Benoît Vinot -- AMPL lexer -* Linh Vu Hong -- RSL lexer -* Nathan Weizenbaum -- Haml and Sass lexers -* Nathan Whetsell -- Csound lexers -* Dietmar Winkler -- Modelica lexer -* Nils Winter -- Smalltalk lexer -* Davy Wybiral -- Clojure lexer -* Whitney Young -- ObjectiveC lexer -* Diego Zamboni -- CFengine3 lexer -* Enrique Zamudio -- Ceylon lexer -* Alex Zimin -- Nemerle lexer -* Rob Zimmerman -- Kal lexer -* Vincent Zurczak -- Roboconf lexer - -Many thanks for all contributions! +Pygments is written and maintained by Georg Brandl . + +Major developers are Tim Hatch and Armin Ronacher +. + +Other contributors, listed alphabetically, are: + +* Sam Aaron -- Ioke lexer +* Ali Afshar -- image formatter +* Thomas Aglassinger -- Easytrieve, JCL, Rexx, Transact-SQL and VBScript + lexers +* Muthiah Annamalai -- Ezhil lexer +* Kumar Appaiah -- Debian control lexer +* Andreas Amann -- AppleScript lexer +* Timothy Armstrong -- Dart lexer fixes +* Jeffrey Arnold -- R/S, Rd, BUGS, Jags, and Stan lexers +* Jeremy Ashkenas -- CoffeeScript lexer +* José Joaquín Atria -- Praat lexer +* Stefan Matthias Aust -- Smalltalk lexer +* Lucas Bajolet -- Nit lexer +* Ben Bangert -- Mako lexers +* Max Battcher -- Darcs patch lexer +* Thomas Baruchel -- APL lexer +* Tim Baumann -- (Literate) Agda lexer +* Paul Baumgart, 280 North, Inc. -- Objective-J lexer +* Michael Bayer -- Myghty lexers +* Thomas Beale -- Archetype lexers +* John Benediktsson -- Factor lexer +* Trevor Bergeron -- mIRC formatter +* Vincent Bernat -- LessCSS lexer +* Christopher Bertels -- Fancy lexer +* Sébastien Bigaret -- QVT Operational lexer +* Jarrett Billingsley -- MiniD lexer +* Adam Blinkinsop -- Haskell, Redcode lexers +* Stéphane Blondon -- SGF and Sieve lexers +* Frits van Bommel -- assembler lexers +* Pierre Bourdon -- bugfixes +* Martijn Braam -- Kernel log lexer, BARE lexer +* Matthias Bussonnier -- ANSI style handling for terminal-256 formatter +* chebee7i -- Python traceback lexer improvements +* Hiram Chirino -- Scaml and Jade lexers +* Mauricio Caceres -- SAS and Stata lexers. +* Ian Cooper -- VGL lexer +* David Corbett -- Inform, Jasmin, JSGF, Snowball, and TADS 3 lexers +* Leaf Corcoran -- MoonScript lexer +* Christopher Creutzig -- MuPAD lexer +* Daniël W. Crompton -- Pike lexer +* Pete Curry -- bugfixes +* Bryan Davis -- EBNF lexer +* Bruno Deferrari -- Shen lexer +* Giedrius Dubinskas -- HTML formatter improvements +* Owen Durni -- Haxe lexer +* Alexander Dutton, Oxford University Computing Services -- SPARQL lexer +* James Edwards -- Terraform lexer +* Nick Efford -- Python 3 lexer +* Sven Efftinge -- Xtend lexer +* Artem Egorkine -- terminal256 formatter +* Matthew Fernandez -- CAmkES lexer +* Paweł Fertyk -- GDScript lexer, HTML formatter improvements +* Michael Ficarra -- CPSA lexer +* James H. Fisher -- PostScript lexer +* William S. Fulton -- SWIG lexer +* Carlos Galdino -- Elixir and Elixir Console lexers +* Michael Galloy -- IDL lexer +* Naveen Garg -- Autohotkey lexer +* Simon Garnotel -- FreeFem++ lexer +* Laurent Gautier -- R/S lexer +* Alex Gaynor -- PyPy log lexer +* Richard Gerkin -- Igor Pro lexer +* Alain Gilbert -- TypeScript lexer +* Alex Gilding -- BlitzBasic lexer +* GitHub, Inc -- DASM16, Augeas, TOML, and Slash lexers +* Bertrand Goetzmann -- Groovy lexer +* Krzysiek Goj -- Scala lexer +* Rostyslav Golda -- FloScript lexer +* Andrey Golovizin -- BibTeX lexers +* Matt Good -- Genshi, Cheetah lexers +* Michał Górny -- vim modeline support +* Alex Gosse -- TrafficScript lexer +* Patrick Gotthardt -- PHP namespaces support +* Olivier Guibe -- Asymptote lexer +* Phil Hagelberg -- Fennel lexer +* Florian Hahn -- Boogie lexer +* Martin Harriman -- SNOBOL lexer +* Matthew Harrison -- SVG formatter +* Steven Hazel -- Tcl lexer +* Dan Michael Heggø -- Turtle lexer +* Aslak Hellesøy -- Gherkin lexer +* Greg Hendershott -- Racket lexer +* Justin Hendrick -- ParaSail lexer +* Jordi Gutiérrez Hermoso -- Octave lexer +* David Hess, Fish Software, Inc. -- Objective-J lexer +* Varun Hiremath -- Debian control lexer +* Rob Hoelz -- Perl 6 lexer +* Doug Hogan -- Mscgen lexer +* Ben Hollis -- Mason lexer +* Max Horn -- GAP lexer +* Alastair Houghton -- Lexer inheritance facility +* Tim Howard -- BlitzMax lexer +* Dustin Howett -- Logos lexer +* Ivan Inozemtsev -- Fantom lexer +* Hiroaki Itoh -- Shell console rewrite, Lexers for PowerShell session, + MSDOS session, BC, WDiff +* Brian R. Jackson -- Tea lexer +* Christian Jann -- ShellSession lexer +* Dennis Kaarsemaker -- sources.list lexer +* Dmitri Kabak -- Inferno Limbo lexer +* Igor Kalnitsky -- vhdl lexer +* Colin Kennedy - USD lexer +* Alexander Kit -- MaskJS lexer +* Pekka Klärck -- Robot Framework lexer +* Gerwin Klein -- Isabelle lexer +* Eric Knibbe -- Lasso lexer +* Stepan Koltsov -- Clay lexer +* Adam Koprowski -- Opa lexer +* Benjamin Kowarsch -- Modula-2 lexer +* Domen Kožar -- Nix lexer +* Oleh Krekel -- Emacs Lisp lexer +* Alexander Kriegisch -- Kconfig and AspectJ lexers +* Marek Kubica -- Scheme lexer +* Jochen Kupperschmidt -- Markdown processor +* Gerd Kurzbach -- Modelica lexer +* Jon Larimer, Google Inc. -- Smali lexer +* Olov Lassus -- Dart lexer +* Matt Layman -- TAP lexer +* Kristian Lyngstøl -- Varnish lexers +* Sylvestre Ledru -- Scilab lexer +* Chee Sing Lee -- Flatline lexer +* Mark Lee -- Vala lexer +* Valentin Lorentz -- C++ lexer improvements +* Ben Mabey -- Gherkin lexer +* Angus MacArthur -- QML lexer +* Louis Mandel -- X10 lexer +* Louis Marchand -- Eiffel lexer +* Simone Margaritelli -- Hybris lexer +* Kirk McDonald -- D lexer +* Gordon McGregor -- SystemVerilog lexer +* Stephen McKamey -- Duel/JBST lexer +* Brian McKenna -- F# lexer +* Charles McLaughlin -- Puppet lexer +* Kurt McKee -- Tera Term macro lexer, PostgreSQL updates, MySQL overhaul +* Lukas Meuser -- BBCode formatter, Lua lexer +* Cat Miller -- Pig lexer +* Paul Miller -- LiveScript lexer +* Hong Minhee -- HTTP lexer +* Michael Mior -- Awk lexer +* Bruce Mitchener -- Dylan lexer rewrite +* Reuben Morais -- SourcePawn lexer +* Jon Morton -- Rust lexer +* Paulo Moura -- Logtalk lexer +* Mher Movsisyan -- DTD lexer +* Dejan Muhamedagic -- Crmsh lexer +* Ana Nelson -- Ragel, ANTLR, R console lexers +* Kurt Neufeld -- Markdown lexer +* Nam T. Nguyen -- Monokai style +* Jesper Noehr -- HTML formatter "anchorlinenos" +* Mike Nolta -- Julia lexer +* Avery Nortonsmith -- Pointless lexer +* Jonas Obrist -- BBCode lexer +* Edward O'Callaghan -- Cryptol lexer +* David Oliva -- Rebol lexer +* Pat Pannuto -- nesC lexer +* Jon Parise -- Protocol buffers and Thrift lexers +* Benjamin Peterson -- Test suite refactoring +* Ronny Pfannschmidt -- BBCode lexer +* Dominik Picheta -- Nimrod lexer +* Andrew Pinkham -- RTF Formatter Refactoring +* Clément Prévost -- UrbiScript lexer +* Tanner Prynn -- cmdline -x option and loading lexers from files +* Oleh Prypin -- Crystal lexer (based on Ruby lexer) +* Xidorn Quan -- Web IDL lexer +* Elias Rabel -- Fortran fixed form lexer +* raichoo -- Idris lexer +* Daniel Ramirez -- GDScript lexer +* Kashif Rasul -- CUDA lexer +* Nathan Reed -- HLSL lexer +* Justin Reidy -- MXML lexer +* Norman Richards -- JSON lexer +* Corey Richardson -- Rust lexer updates +* Lubomir Rintel -- GoodData MAQL and CL lexers +* Andre Roberge -- Tango style +* Georg Rollinger -- HSAIL lexer +* Michiel Roos -- TypoScript lexer +* Konrad Rudolph -- LaTeX formatter enhancements +* Mario Ruggier -- Evoque lexers +* Miikka Salminen -- Lovelace style, Hexdump lexer, lexer enhancements +* Stou Sandalski -- NumPy, FORTRAN, tcsh and XSLT lexers +* Matteo Sasso -- Common Lisp lexer +* Joe Schafer -- Ada lexer +* Max Schillinger -- TiddlyWiki5 lexer +* Ken Schutte -- Matlab lexers +* René Schwaiger -- Rainbow Dash style +* Sebastian Schweizer -- Whiley lexer +* Tassilo Schweyer -- Io, MOOCode lexers +* Pablo Seminario -- PromQL lexer +* Ted Shaw -- AutoIt lexer +* Joerg Sieker -- ABAP lexer +* Robert Simmons -- Standard ML lexer +* Kirill Simonov -- YAML lexer +* Corbin Simpson -- Monte lexer +* Alexander Smishlajev -- Visual FoxPro lexer +* Steve Spigarelli -- XQuery lexer +* Jerome St-Louis -- eC lexer +* Camil Staps -- Clean and NuSMV lexers; Solarized style +* James Strachan -- Kotlin lexer +* Tom Stuart -- Treetop lexer +* Colin Sullivan -- SuperCollider lexer +* Ben Swift -- Extempore lexer +* Edoardo Tenani -- Arduino lexer +* Tiberius Teng -- default style overhaul +* Jeremy Thurgood -- Erlang, Squid config lexers +* Brian Tiffin -- OpenCOBOL lexer +* Bob Tolbert -- Hy lexer +* Matthias Trute -- Forth lexer +* Erick Tryzelaar -- Felix lexer +* Alexander Udalov -- Kotlin lexer improvements +* Thomas Van Doren -- Chapel lexer +* Daniele Varrazzo -- PostgreSQL lexers +* Abe Voelker -- OpenEdge ABL lexer +* Pepijn de Vos -- HTML formatter CTags support +* Matthias Vallentin -- Bro lexer +* Benoît Vinot -- AMPL lexer +* Linh Vu Hong -- RSL lexer +* Nathan Weizenbaum -- Haml and Sass lexers +* Nathan Whetsell -- Csound lexers +* Dietmar Winkler -- Modelica lexer +* Nils Winter -- Smalltalk lexer +* Davy Wybiral -- Clojure lexer +* Whitney Young -- ObjectiveC lexer +* Diego Zamboni -- CFengine3 lexer +* Enrique Zamudio -- Ceylon lexer +* Alex Zimin -- Nemerle lexer +* Rob Zimmerman -- Kal lexer +* Vincent Zurczak -- Roboconf lexer +* Hubert Gruniaux -- C and C++ lexer improvements + +Many thanks for all contributions! diff --git a/CHANGES b/CHANGES old mode 100644 new mode 100755 index 47d378c..97701c1 --- a/CHANGES +++ b/CHANGES @@ -1,1457 +1,1520 @@ -Pygments changelog -================== - -Since 2.5.0, issue numbers refer to the tracker at -, -pull request numbers to the requests at -. - - -Version 2.6.1 -------------- -(released March 8, 2020) - -- This release fixes a packaging issue. No functional changes. - -Version 2.6 ------------ -(released March 8, 2020) - -- Running Pygments on Python 2.x is no longer supported. - (The Python 2 lexer still exists.) - -- Added lexers: - - * Linux kernel logs (PR#1310) - * LLVM MIR (PR#1361) - * MiniScript (PR#1397) - * Mosel (PR#1287, PR#1326) - * Parsing Expression Grammar (PR#1336) - * ReasonML (PR#1386) - * Ride (PR#1319, PR#1321) - * Sieve (PR#1257) - * USD (PR#1290) - * WebIDL (PR#1309) - -- Updated lexers: - - * Apache2 (PR#1378) - * Chapel (PR#1357) - * CSound (PR#1383) - * D (PR#1375, PR#1362) - * Idris (PR#1360) - * Perl6/Raku lexer (PR#1344) - * Python3 (PR#1382, PR#1385) - * Rust: Updated lexer to cover more builtins (mostly macros) and miscellaneous - new syntax (PR#1320) - * SQL: Add temporal support keywords (PR#1402) - -- The 256-color/true-color terminal formatters now support the italic attribute - in styles (PR#1288) -- Support HTTP 2/3 header (PR#1308) -- Support missing reason in HTTP header (PR#1322) -- Boogie/Silver: support line continuations and triggers, move contract keywords - to separate category (PR#1299) -- GAS: support C-style comments (PR#1291) -- Fix names in S lexer (PR#1330, PR#1333) -- Fix numeric literals in Ada (PR#1334) -- Recognize ``.mjs`` files as Javascript (PR#1392) -- Recognize ``.eex`` files as Elixir (PR#1387) -- Fix ``re.MULTILINE`` usage (PR#1388) -- Recognize ``pipenv`` and ``poetry`` dependency & lock files (PR#1376) -- Improve font search on Windows (#1247) -- Remove unused script block (#1401) - -Version 2.5.2 -------------- -(released November 29, 2019) - -- Fix incompatibility with some setuptools versions (PR#1316) - -- Fix lexing of ReST field lists (PR#1279) -- Fix lexing of Matlab keywords as field names (PR#1282) -- Recognize double-quoted strings in Matlab (PR#1278) -- Avoid slow backtracking in Vim lexer (PR#1312) -- Fix Scala highlighting of types (PR#1315) -- Highlight field lists more consistently in ReST (PR#1279) -- Fix highlighting Matlab keywords in field names (PR#1282) -- Recognize Matlab double quoted strings (PR#1278) -- Add some Terraform keywords -- Update Modelica lexer to 3.4 -- Update Crystal examples - - -Version 2.5.1 -------------- -(released November 26, 2019) - -- This release fixes a packaging issue. No functional changes. - - -Version 2.5.0 -------------- -(released November 26, 2019) - -- Added lexers: - - * Email (PR#1246) - * Erlang, Elixir shells (PR#823, #1521) - * Notmuch (PR#1264) - * `Scdoc `_ (PR#1268) - * `Solidity `_ (#1214) - * `Zeek `_ (new name for Bro) (PR#1269) - * `Zig `_ (PR#820) - -- Updated lexers: - - * Apache2 Configuration (PR#1251) - * Bash sessions (#1253) - * CSound (PR#1250) - * Dart - * Dockerfile - * Emacs Lisp - * Handlebars (PR#773) - * Java (#1101, #987) - * Logtalk (PR#1261) - * Matlab (PR#1271) - * Praat (PR#1277) - * Python3 (PR#1255, PR#1400) - * Ruby - * YAML (#1528) - * Velocity - -- Added styles: - - * Inkpot (PR#1276) - -- The ``PythonLexer`` class is now an alias for the former ``Python3Lexer``. - The old ``PythonLexer`` is available as ``Python2Lexer``. Same change has - been done for the ``PythonTracebackLexer``. The ``python3`` option for - the ``PythonConsoleLexer`` is now true by default. - -- Bump ``NasmLexer`` priority over ``TasmLexer`` for ``.asm`` files - (fixes #1326) -- Default font in the ``ImageFormatter`` has been updated (#928, PR#1245) -- Test suite switched to py.test, removed nose dependency (#1490) -- Reduce ``TeraTerm`` lexer score -- it used to match nearly all languages - (#1256) -- Treat ``Skylark``/``Starlark`` files as Python files (PR#1259) -- Image formatter: actually respect ``line_number_separator`` option - -- Add LICENSE file to wheel builds -- Agda: fix lambda highlighting -- Dart: support ``@`` annotations -- Dockerfile: accept ``FROM ... AS`` syntax -- Emacs Lisp: add more string functions -- GAS: accept registers in directive arguments -- Java: make structural punctuation (braces, parens, colon, comma) ``Punctuation``, not ``Operator`` (#987) -- Java: support ``var`` contextual keyword (#1101) -- Matlab: Fix recognition of ``function`` keyword (PR#1271) -- Python: recognize ``.jy`` filenames (#976) -- Python: recognize ``f`` string prefix (#1156) -- Ruby: support squiggly heredocs -- Shell sessions: recognize Virtualenv prompt (PR#1266) -- Velocity: support silent reference syntax - - -Version 2.4.2 -------------- -(released May 28, 2019) - -- Fix encoding error when guessing lexer with given ``encoding`` option - (#1438) - - -Version 2.4.1 -------------- -(released May 24, 2019) - -- Updated lexers: - - * Coq (#1430) - * MSDOS Session (PR#734) - * NASM (#1517) - * Objective-C (PR#813, #1508) - * Prolog (#1511) - * TypeScript (#1515) - -- Support CSS variables in stylesheets (PR#814, #1356) -- Fix F# lexer name (PR#709) -- Fix ``TerminalFormatter`` using bold for bright text (#1480) - - -Version 2.4.0 -------------- -(released May 8, 2019) - -- Added lexers: - - * Augeas (PR#807) - * BBC Basic (PR#806) - * Boa (PR#756) - * Charm++ CI (PR#788) - * DASM16 (PR#807) - * FloScript (PR#750) - * FreeFem++ (PR#785) - * Hspec (PR#790) - * Pony (PR#627) - * SGF (PR#780) - * Slash (PR#807) - * Slurm (PR#760) - * Tera Term Language (PR#749) - * TOML (PR#807) - * Unicon (PR#731) - * VBScript (PR#673) - -- Updated lexers: - - * Apache2 (PR#766) - * Cypher (PR#746) - * LLVM (PR#792) - * Makefiles (PR#766) - * PHP (#1482) - * Rust - * SQL (PR#672) - * Stan (PR#774) - * Stata (PR#800) - * Terraform (PR#787) - * YAML - -- Add solarized style (PR#708) -- Add support for Markdown reference-style links (PR#753) -- Add license information to generated HTML/CSS files (#1496) -- Change ANSI color names (PR#777) -- Fix catastrophic backtracking in the bash lexer (#1494) -- Fix documentation failing to build using Sphinx 2.0 (#1501) -- Fix incorrect links in the Lisp and R lexer documentation (PR#775) -- Fix rare unicode errors on Python 2.7 (PR#798, #1492) -- Fix lexers popping from an empty stack (#1506) -- TypoScript uses ``.typoscript`` now (#1498) -- Updated Trove classifiers and ``pip`` requirements (PR#799) - - - -Version 2.3.1 -------------- -(released Dec 16, 2018) - -- Updated lexers: - - * ASM (PR#784) - * Chapel (PR#735) - * Clean (PR#621) - * CSound (PR#684) - * Elm (PR#744) - * Fortran (PR#747) - * GLSL (PR#740) - * Haskell (PR#745) - * Hy (PR#754) - * Igor Pro (PR#764) - * PowerShell (PR#705) - * Python (PR#720, #1299, PR#715) - * SLexer (PR#680) - * YAML (PR#762, PR#724) - -- Fix invalid string escape sequences -- Fix `FutureWarning` introduced by regex changes in Python 3.7 - - -Version 2.3.0 -------------- -(released Nov 25, 2018) - -- Added lexers: - - * Fennel (PR#783) - * HLSL (PR#675) - -- Updated lexers: - - * Dockerfile (PR#714) - -- Minimum Python versions changed to 2.7 and 3.5 -- Added support for Python 3.7 generator changes (PR#772) -- Fix incorrect token type in SCSS for single-quote strings (#1322) -- Use `terminal256` formatter if `TERM` contains `256` (PR#666) -- Fix incorrect handling of GitHub style fences in Markdown (PR#741, #1389) -- Fix `%a` not being highlighted in Python3 strings (PR#727) - - -Version 2.2.0 -------------- -(released Jan 22, 2017) - -- Added lexers: - - * AMPL - * TypoScript (#1173) - * Varnish config (PR#554) - * Clean (PR#503) - * WDiff (PR#513) - * Flatline (PR#551) - * Silver (PR#537) - * HSAIL (PR#518) - * JSGF (PR#546) - * NCAR command language (PR#536) - * Extempore (PR#530) - * Cap'n Proto (PR#595) - * Whiley (PR#573) - * Monte (PR#592) - * Crystal (PR#576) - * Snowball (PR#589) - * CapDL (PR#579) - * NuSMV (PR#564) - * SAS, Stata (PR#593) - -- Added the ability to load lexer and formatter classes directly from files - with the `-x` command line option and the `lexers.load_lexer_from_file()` - and `formatters.load_formatter_from_file()` functions. (PR#559) - -- Added `lexers.find_lexer_class_by_name()`. (#1203) - -- Added new token types and lexing for magic methods and variables in Python - and PHP. - -- Added a new token type for string affixes and lexing for them in Python, C++ - and Postgresql lexers. - -- Added a new token type for heredoc (and similar) string delimiters and - lexing for them in C++, Perl, PHP, Postgresql and Ruby lexers. - -- Styles can now define colors with ANSI colors for use in the 256-color - terminal formatter. (PR#531) - -- Improved the CSS lexer. (#1083, #1130) - -- Added "Rainbow Dash" style. (PR#623) - -- Delay loading `pkg_resources`, which takes a long while to import. (PR#690) - - -Version 2.1.3 -------------- -(released Mar 2, 2016) - -- Fixed regression in Bash lexer (PR#563) - - -Version 2.1.2 -------------- -(released Feb 29, 2016) - -- Fixed Python 3 regression in image formatter (#1215) -- Fixed regression in Bash lexer (PR#562) - - -Version 2.1.1 -------------- -(relased Feb 14, 2016) - -- Fixed Jython compatibility (#1205) -- Fixed HTML formatter output with leading empty lines (#1111) -- Added a mapping table for LaTeX encodings and added utf8 (#1152) -- Fixed image formatter font searching on Macs (#1188) -- Fixed deepcopy-ing of Token instances (#1168) -- Fixed Julia string interpolation (#1170) -- Fixed statefulness of HttpLexer between get_tokens calls -- Many smaller fixes to various lexers - - -Version 2.1 ------------ -(released Jan 17, 2016) - -- Added lexers: - - * Emacs Lisp (PR#431) - * Arduino (PR#442) - * Modula-2 with multi-dialect support (#1090) - * Fortran fixed format (PR#213) - * Archetype Definition language (PR#483) - * Terraform (PR#432) - * Jcl, Easytrieve (PR#208) - * ParaSail (PR#381) - * Boogie (PR#420) - * Turtle (PR#425) - * Fish Shell (PR#422) - * Roboconf (PR#449) - * Test Anything Protocol (PR#428) - * Shen (PR#385) - * Component Pascal (PR#437) - * SuperCollider (PR#472) - * Shell consoles (Tcsh, PowerShell, MSDOS) (PR#479) - * Elm and J (PR#452) - * Crmsh (PR#440) - * Praat (PR#492) - * CSound (PR#494) - * Ezhil (PR#443) - * Thrift (PR#469) - * QVT Operational (PR#204) - * Hexdump (PR#508) - * CAmkES Configuration (PR#462) - -- Added styles: - - * Lovelace (PR#456) - * Algol and Algol-nu (#1090) - -- Added formatters: - - * IRC (PR#458) - * True color (24-bit) terminal ANSI sequences (#1142) - (formatter alias: "16m") - -- New "filename" option for HTML formatter (PR#527). - -- Improved performance of the HTML formatter for long lines (PR#504). - -- Updated autopygmentize script (PR#445). - -- Fixed style inheritance for non-standard token types in HTML output. - -- Added support for async/await to Python 3 lexer. - -- Rewrote linenos option for TerminalFormatter (it's better, but slightly - different output than before) (#1147). - -- Javascript lexer now supports most of ES6 (#1100). - -- Cocoa builtins updated for iOS 8.1 (PR#433). - -- Combined BashSessionLexer and ShellSessionLexer, new version should support - the prompt styles of either. - -- Added option to pygmentize to show a full traceback on exceptions. - -- Fixed incomplete output on Windows and Python 3 (e.g. when using iPython - Notebook) (#1153). - -- Allowed more traceback styles in Python console lexer (PR#253). - -- Added decorators to TypeScript (PR#509). - -- Fix highlighting of certain IRC logs formats (#1076). - - -Version 2.0.2 -------------- -(released Jan 20, 2015) - -- Fix Python tracebacks getting duplicated in the console lexer (#1068). - -- Backquote-delimited identifiers are now recognized in F# (#1062). - - -Version 2.0.1 -------------- -(released Nov 10, 2014) - -- Fix an encoding issue when using ``pygmentize`` with the ``-o`` option. - - -Version 2.0 ------------ -(released Nov 9, 2014) - -- Default lexer encoding is now "guess", i.e. UTF-8 / Locale / Latin1 is - tried in that order. - -- Major update to Swift lexer (PR#410). - -- Multiple fixes to lexer guessing in conflicting cases: - - * recognize HTML5 by doctype - * recognize XML by XML declaration - * don't recognize C/C++ as SystemVerilog - -- Simplified regexes and builtin lists. - - -Version 2.0rc1 --------------- -(released Oct 16, 2014) - -- Dropped Python 2.4 and 2.5 compatibility. This is in favor of single-source - compatibility between Python 2.6, 2.7 and 3.3+. - -- New website and documentation based on Sphinx (finally!) - -- Lexers added: - - * APL (#969) - * Agda and Literate Agda (PR#203) - * Alloy (PR#355) - * AmbientTalk - * BlitzBasic (PR#197) - * ChaiScript (PR#24) - * Chapel (PR#256) - * Cirru (PR#275) - * Clay (PR#184) - * ColdFusion CFC (PR#283) - * Cryptol and Literate Cryptol (PR#344) - * Cypher (PR#257) - * Docker config files - * EBNF (PR#193) - * Eiffel (PR#273) - * GAP (PR#311) - * Golo (PR#309) - * Handlebars (PR#186) - * Hy (PR#238) - * Idris and Literate Idris (PR#210) - * Igor Pro (PR#172) - * Inform 6/7 (PR#281) - * Intel objdump (PR#279) - * Isabelle (PR#386) - * Jasmin (PR#349) - * JSON-LD (PR#289) - * Kal (PR#233) - * Lean (PR#399) - * LSL (PR#296) - * Limbo (PR#291) - * Liquid (#977) - * MQL (PR#285) - * MaskJS (PR#280) - * Mozilla preprocessors - * Mathematica (PR#245) - * NesC (PR#166) - * Nit (PR#375) - * Nix (PR#267) - * Pan - * Pawn (PR#211) - * Perl 6 (PR#181) - * Pig (PR#304) - * Pike (PR#237) - * QBasic (PR#182) - * Red (PR#341) - * ResourceBundle (#1038) - * Rexx (PR#199) - * Rql (PR#251) - * Rsl - * SPARQL (PR#78) - * Slim (PR#366) - * Swift (PR#371) - * Swig (PR#168) - * TADS 3 (PR#407) - * Todo.txt todo lists - * Twig (PR#404) - -- Added a helper to "optimize" regular expressions that match one of many - literal words; this can save 20% and more lexing time with lexers that - highlight many keywords or builtins. - -- New styles: "xcode" and "igor", similar to the default highlighting of - the respective IDEs. - -- The command-line "pygmentize" tool now tries a little harder to find the - correct encoding for files and the terminal (#979). - -- Added "inencoding" option for lexers to override "encoding" analogous - to "outencoding" (#800). - -- Added line-by-line "streaming" mode for pygmentize with the "-s" option. - (PR#165) Only fully works for lexers that have no constructs spanning - lines! - -- Added an "envname" option to the LaTeX formatter to select a replacement - verbatim environment (PR#235). - -- Updated the Makefile lexer to yield a little more useful highlighting. - -- Lexer aliases passed to ``get_lexer_by_name()`` are now case-insensitive. - -- File name matching in lexers and formatters will now use a regex cache - for speed (PR#205). - -- Pygments will now recognize "vim" modelines when guessing the lexer for - a file based on content (PR#118). - -- Major restructure of the ``pygments.lexers`` module namespace. There are now - many more modules with less lexers per module. Old modules are still around - and re-export the lexers they previously contained. - -- The NameHighlightFilter now works with any Name.* token type (#790). - -- Python 3 lexer: add new exceptions from PEP 3151. - -- Opa lexer: add new keywords (PR#170). - -- Julia lexer: add keywords and underscore-separated number - literals (PR#176). - -- Lasso lexer: fix method highlighting, update builtins. Fix - guessing so that plain XML isn't always taken as Lasso (PR#163). - -- Objective C/C++ lexers: allow "@" prefixing any expression (#871). - -- Ruby lexer: fix lexing of Name::Space tokens (#860) and of symbols - in hashes (#873). - -- Stan lexer: update for version 2.4.0 of the language (PR#162, PR#255, PR#377). - -- JavaScript lexer: add the "yield" keyword (PR#196). - -- HTTP lexer: support for PATCH method (PR#190). - -- Koka lexer: update to newest language spec (PR#201). - -- Haxe lexer: rewrite and support for Haxe 3 (PR#174). - -- Prolog lexer: add different kinds of numeric literals (#864). - -- F# lexer: rewrite with newest spec for F# 3.0 (#842), fix a bug with - dotted chains (#948). - -- Kotlin lexer: general update (PR#271). - -- Rebol lexer: fix comment detection and analyse_text (PR#261). - -- LLVM lexer: update keywords to v3.4 (PR#258). - -- PHP lexer: add new keywords and binary literals (PR#222). - -- external/markdown-processor.py updated to newest python-markdown (PR#221). - -- CSS lexer: some highlighting order fixes (PR#231). - -- Ceylon lexer: fix parsing of nested multiline comments (#915). - -- C family lexers: fix parsing of indented preprocessor directives (#944). - -- Rust lexer: update to 0.9 language version (PR#270, PR#388). - -- Elixir lexer: update to 0.15 language version (PR#392). - -- Fix swallowing incomplete tracebacks in Python console lexer (#874). - - -Version 1.6 ------------ -(released Feb 3, 2013) - -- Lexers added: - - * Dylan console (PR#149) - * Logos (PR#150) - * Shell sessions (PR#158) - -- Fix guessed lexers not receiving lexer options (#838). - -- Fix unquoted HTML attribute lexing in Opa (#841). - -- Fixes to the Dart lexer (PR#160). - - -Version 1.6rc1 --------------- -(released Jan 9, 2013) - -- Lexers added: - - * AspectJ (PR#90) - * AutoIt (PR#122) - * BUGS-like languages (PR#89) - * Ceylon (PR#86) - * Croc (new name for MiniD) - * CUDA (PR#75) - * Dg (PR#116) - * IDL (PR#115) - * Jags (PR#89) - * Julia (PR#61) - * Kconfig (#711) - * Lasso (PR#95, PR#113) - * LiveScript (PR#84) - * Monkey (PR#117) - * Mscgen (PR#80) - * NSIS scripts (PR#136) - * OpenCOBOL (PR#72) - * QML (PR#123) - * Puppet (PR#133) - * Racket (PR#94) - * Rdoc (PR#99) - * Robot Framework (PR#137) - * RPM spec files (PR#124) - * Rust (PR#67) - * Smali (Dalvik assembly) - * SourcePawn (PR#39) - * Stan (PR#89) - * Treetop (PR#125) - * TypeScript (PR#114) - * VGL (PR#12) - * Visual FoxPro (#762) - * Windows Registry (#819) - * Xtend (PR#68) - -- The HTML formatter now supports linking to tags using CTags files, when the - python-ctags package is installed (PR#87). - -- The HTML formatter now has a "linespans" option that wraps every line in a - tag with a specific id (PR#82). - -- When deriving a lexer from another lexer with token definitions, definitions - for states not in the child lexer are now inherited. If you override a state - in the child lexer, an "inherit" keyword has been added to insert the base - state at that position (PR#141). - -- The C family lexers now inherit token definitions from a common base class, - removing code duplication (PR#141). - -- Use "colorama" on Windows for console color output (PR#142). - -- Fix Template Haskell highlighting (PR#63). - -- Fix some S/R lexer errors (PR#91). - -- Fix a bug in the Prolog lexer with names that start with 'is' (#810). - -- Rewrite Dylan lexer, add Dylan LID lexer (PR#147). - -- Add a Java quickstart document (PR#146). - -- Add a "external/autopygmentize" file that can be used as .lessfilter (#802). - - -Version 1.5 ------------ -(codename Zeitdilatation, released Mar 10, 2012) - -- Lexers added: - - * Awk (#630) - * Fancy (#633) - * PyPy Log - * eC - * Nimrod - * Nemerle (#667) - * F# (#353) - * Groovy (#501) - * PostgreSQL (#660) - * DTD - * Gosu (#634) - * Octave (PR#22) - * Standard ML (PR#14) - * CFengine3 (#601) - * Opa (PR#37) - * HTTP sessions (PR#42) - * JSON (PR#31) - * SNOBOL (PR#30) - * MoonScript (PR#43) - * ECL (PR#29) - * Urbiscript (PR#17) - * OpenEdge ABL (PR#27) - * SystemVerilog (PR#35) - * Coq (#734) - * PowerShell (#654) - * Dart (#715) - * Fantom (PR#36) - * Bro (PR#5) - * NewLISP (PR#26) - * VHDL (PR#45) - * Scilab (#740) - * Elixir (PR#57) - * Tea (PR#56) - * Kotlin (PR#58) - -- Fix Python 3 terminal highlighting with pygmentize (#691). - -- In the LaTeX formatter, escape special &, < and > chars (#648). - -- In the LaTeX formatter, fix display problems for styles with token - background colors (#670). - -- Enhancements to the Squid conf lexer (#664). - -- Several fixes to the reStructuredText lexer (#636). - -- Recognize methods in the ObjC lexer (#638). - -- Fix Lua "class" highlighting: it does not have classes (#665). - -- Fix degenerate regex in Scala lexer (#671) and highlighting bugs (#713, 708). - -- Fix number pattern order in Ocaml lexer (#647). - -- Fix generic type highlighting in ActionScript 3 (#666). - -- Fixes to the Clojure lexer (PR#9). - -- Fix degenerate regex in Nemerle lexer (#706). - -- Fix infinite looping in CoffeeScript lexer (#729). - -- Fix crashes and analysis with ObjectiveC lexer (#693, #696). - -- Add some Fortran 2003 keywords. - -- Fix Boo string regexes (#679). - -- Add "rrt" style (#727). - -- Fix infinite looping in Darcs Patch lexer. - -- Lots of misc fixes to character-eating bugs and ordering problems in many - different lexers. - - -Version 1.4 ------------ -(codename Unschärfe, released Jan 03, 2011) - -- Lexers added: - - * Factor (#520) - * PostScript (#486) - * Verilog (#491) - * BlitzMax Basic (#478) - * Ioke (#465) - * Java properties, split out of the INI lexer (#445) - * Scss (#509) - * Duel/JBST - * XQuery (#617) - * Mason (#615) - * GoodData (#609) - * SSP (#473) - * Autohotkey (#417) - * Google Protocol Buffers - * Hybris (#506) - -- Do not fail in analyse_text methods (#618). - -- Performance improvements in the HTML formatter (#523). - -- With the ``noclasses`` option in the HTML formatter, some styles - present in the stylesheet were not added as inline styles. - -- Four fixes to the Lua lexer (#480, #481, #482, #497). - -- More context-sensitive Gherkin lexer with support for more i18n translations. - -- Support new OO keywords in Matlab lexer (#521). - -- Small fix in the CoffeeScript lexer (#519). - -- A bugfix for backslashes in ocaml strings (#499). - -- Fix unicode/raw docstrings in the Python lexer (#489). - -- Allow PIL to work without PIL.pth (#502). - -- Allow seconds as a unit in CSS (#496). - -- Support ``application/javascript`` as a JavaScript mime type (#504). - -- Support `Offload `_ C++ Extensions as - keywords in the C++ lexer (#484). - -- Escape more characters in LaTeX output (#505). - -- Update Haml/Sass lexers to version 3 (#509). - -- Small PHP lexer string escaping fix (#515). - -- Support comments before preprocessor directives, and unsigned/ - long long literals in C/C++ (#613, #616). - -- Support line continuations in the INI lexer (#494). - -- Fix lexing of Dylan string and char literals (#628). - -- Fix class/procedure name highlighting in VB.NET lexer (#624). - - -Version 1.3.1 -------------- -(bugfix release, released Mar 05, 2010) - -- The ``pygmentize`` script was missing from the distribution. - - -Version 1.3 ------------ -(codename Schneeglöckchen, released Mar 01, 2010) - -- Added the ``ensurenl`` lexer option, which can be used to suppress the - automatic addition of a newline to the lexer input. - -- Lexers added: - - * Ada - * Coldfusion - * Modula-2 - * Haxe - * R console - * Objective-J - * Haml and Sass - * CoffeeScript - -- Enhanced reStructuredText highlighting. - -- Added support for PHP 5.3 namespaces in the PHP lexer. - -- Added a bash completion script for `pygmentize`, to the external/ - directory (#466). - -- Fixed a bug in `do_insertions()` used for multi-lexer languages. - -- Fixed a Ruby regex highlighting bug (#476). - -- Fixed regex highlighting bugs in Perl lexer (#258). - -- Add small enhancements to the C lexer (#467) and Bash lexer (#469). - -- Small fixes for the Tcl, Debian control file, Nginx config, - Smalltalk, Objective-C, Clojure, Lua lexers. - -- Gherkin lexer: Fixed single apostrophe bug and added new i18n keywords. - - -Version 1.2.2 -------------- -(bugfix release, released Jan 02, 2010) - -* Removed a backwards incompatibility in the LaTeX formatter that caused - Sphinx to produce invalid commands when writing LaTeX output (#463). - -* Fixed a forever-backtracking regex in the BashLexer (#462). - - -Version 1.2.1 -------------- -(bugfix release, released Jan 02, 2010) - -* Fixed mishandling of an ellipsis in place of the frames in a Python - console traceback, resulting in clobbered output. - - -Version 1.2 ------------ -(codename Neujahr, released Jan 01, 2010) - -- Dropped Python 2.3 compatibility. - -- Lexers added: - - * Asymptote - * Go - * Gherkin (Cucumber) - * CMake - * Ooc - * Coldfusion - * Haxe - * R console - -- Added options for rendering LaTeX in source code comments in the - LaTeX formatter (#461). - -- Updated the Logtalk lexer. - -- Added `line_number_start` option to image formatter (#456). - -- Added `hl_lines` and `hl_color` options to image formatter (#457). - -- Fixed the HtmlFormatter's handling of noclasses=True to not output any - classes (#427). - -- Added the Monokai style (#453). - -- Fixed LLVM lexer identifier syntax and added new keywords (#442). - -- Fixed the PythonTracebackLexer to handle non-traceback data in header or - trailer, and support more partial tracebacks that start on line 2 (#437). - -- Fixed the CLexer to not highlight ternary statements as labels. - -- Fixed lexing of some Ruby quoting peculiarities (#460). - -- A few ASM lexer fixes (#450). - - -Version 1.1.1 -------------- -(bugfix release, released Sep 15, 2009) - -- Fixed the BBCode lexer (#435). - -- Added support for new Jinja2 keywords. - -- Fixed test suite failures. - -- Added Gentoo-specific suffixes to Bash lexer. - - -Version 1.1 ------------ -(codename Brillouin, released Sep 11, 2009) - -- Ported Pygments to Python 3. This needed a few changes in the way - encodings are handled; they may affect corner cases when used with - Python 2 as well. - -- Lexers added: - - * Antlr/Ragel, thanks to Ana Nelson - * (Ba)sh shell - * Erlang shell - * GLSL - * Prolog - * Evoque - * Modelica - * Rebol - * MXML - * Cython - * ABAP - * ASP.net (VB/C#) - * Vala - * Newspeak - -- Fixed the LaTeX formatter's output so that output generated for one style - can be used with the style definitions of another (#384). - -- Added "anchorlinenos" and "noclobber_cssfile" (#396) options to HTML - formatter. - -- Support multiline strings in Lua lexer. - -- Rewrite of the JavaScript lexer by Pumbaa80 to better support regular - expression literals (#403). - -- When pygmentize is asked to highlight a file for which multiple lexers - match the filename, use the analyse_text guessing engine to determine the - winner (#355). - -- Fixed minor bugs in the JavaScript lexer (#383), the Matlab lexer (#378), - the Scala lexer (#392), the INI lexer (#391), the Clojure lexer (#387) - and the AS3 lexer (#389). - -- Fixed three Perl heredoc lexing bugs (#379, #400, #422). - -- Fixed a bug in the image formatter which misdetected lines (#380). - -- Fixed bugs lexing extended Ruby strings and regexes. - -- Fixed a bug when lexing git diffs. - -- Fixed a bug lexing the empty commit in the PHP lexer (#405). - -- Fixed a bug causing Python numbers to be mishighlighted as floats (#397). - -- Fixed a bug when backslashes are used in odd locations in Python (#395). - -- Fixed various bugs in Matlab and S-Plus lexers, thanks to Winston Chang (#410, - #411, #413, #414) and fmarc (#419). - -- Fixed a bug in Haskell single-line comment detection (#426). - -- Added new-style reStructuredText directive for docutils 0.5+ (#428). - - -Version 1.0 ------------ -(codename Dreiundzwanzig, released Nov 23, 2008) - -- Don't use join(splitlines()) when converting newlines to ``\n``, - because that doesn't keep all newlines at the end when the - ``stripnl`` lexer option is False. - -- Added ``-N`` option to command-line interface to get a lexer name - for a given filename. - -- Added Tango style, written by Andre Roberge for the Crunchy project. - -- Added Python3TracebackLexer and ``python3`` option to - PythonConsoleLexer. - -- Fixed a few bugs in the Haskell lexer. - -- Fixed PythonTracebackLexer to be able to recognize SyntaxError and - KeyboardInterrupt (#360). - -- Provide one formatter class per image format, so that surprises like:: - - pygmentize -f gif -o foo.gif foo.py - - creating a PNG file are avoided. - -- Actually use the `font_size` option of the image formatter. - -- Fixed numpy lexer that it doesn't listen for `*.py` any longer. - -- Fixed HTML formatter so that text options can be Unicode - strings (#371). - -- Unified Diff lexer supports the "udiff" alias now. - -- Fixed a few issues in Scala lexer (#367). - -- RubyConsoleLexer now supports simple prompt mode (#363). - -- JavascriptLexer is smarter about what constitutes a regex (#356). - -- Add Applescript lexer, thanks to Andreas Amann (#330). - -- Make the codetags more strict about matching words (#368). - -- NginxConfLexer is a little more accurate on mimetypes and - variables (#370). - - -Version 0.11.1 --------------- -(released Aug 24, 2008) - -- Fixed a Jython compatibility issue in pygments.unistring (#358). - - -Version 0.11 ------------- -(codename Straußenei, released Aug 23, 2008) - -Many thanks go to Tim Hatch for writing or integrating most of the bug -fixes and new features. - -- Lexers added: - - * Nasm-style assembly language, thanks to delroth - * YAML, thanks to Kirill Simonov - * ActionScript 3, thanks to Pierre Bourdon - * Cheetah/Spitfire templates, thanks to Matt Good - * Lighttpd config files - * Nginx config files - * Gnuplot plotting scripts - * Clojure - * POV-Ray scene files - * Sqlite3 interactive console sessions - * Scala source files, thanks to Krzysiek Goj - -- Lexers improved: - - * C lexer highlights standard library functions now and supports C99 - types. - * Bash lexer now correctly highlights heredocs without preceding - whitespace. - * Vim lexer now highlights hex colors properly and knows a couple - more keywords. - * Irc logs lexer now handles xchat's default time format (#340) and - correctly highlights lines ending in ``>``. - * Support more delimiters for perl regular expressions (#258). - * ObjectiveC lexer now supports 2.0 features. - -- Added "Visual Studio" style. - -- Updated markdown processor to Markdown 1.7. - -- Support roman/sans/mono style defs and use them in the LaTeX - formatter. - -- The RawTokenFormatter is no longer registered to ``*.raw`` and it's - documented that tokenization with this lexer may raise exceptions. - -- New option ``hl_lines`` to HTML formatter, to highlight certain - lines. - -- New option ``prestyles`` to HTML formatter. - -- New option *-g* to pygmentize, to allow lexer guessing based on - filetext (can be slowish, so file extensions are still checked - first). - -- ``guess_lexer()`` now makes its decision much faster due to a cache - of whether data is xml-like (a check which is used in several - versions of ``analyse_text()``. Several lexers also have more - accurate ``analyse_text()`` now. - - -Version 0.10 ------------- -(codename Malzeug, released May 06, 2008) - -- Lexers added: - - * Io - * Smalltalk - * Darcs patches - * Tcl - * Matlab - * Matlab sessions - * FORTRAN - * XSLT - * tcsh - * NumPy - * Python 3 - * S, S-plus, R statistics languages - * Logtalk - -- In the LatexFormatter, the *commandprefix* option is now by default - 'PY' instead of 'C', since the latter resulted in several collisions - with other packages. Also, the special meaning of the *arg* - argument to ``get_style_defs()`` was removed. - -- Added ImageFormatter, to format code as PNG, JPG, GIF or BMP. - (Needs the Python Imaging Library.) - -- Support doc comments in the PHP lexer. - -- Handle format specifications in the Perl lexer. - -- Fix comment handling in the Batch lexer. - -- Add more file name extensions for the C++, INI and XML lexers. - -- Fixes in the IRC and MuPad lexers. - -- Fix function and interface name highlighting in the Java lexer. - -- Fix at-rule handling in the CSS lexer. - -- Handle KeyboardInterrupts gracefully in pygmentize. - -- Added BlackWhiteStyle. - -- Bash lexer now correctly highlights math, does not require - whitespace after semicolons, and correctly highlights boolean - operators. - -- Makefile lexer is now capable of handling BSD and GNU make syntax. - - -Version 0.9 ------------ -(codename Herbstzeitlose, released Oct 14, 2007) - -- Lexers added: - - * Erlang - * ActionScript - * Literate Haskell - * Common Lisp - * Various assembly languages - * Gettext catalogs - * Squid configuration - * Debian control files - * MySQL-style SQL - * MOOCode - -- Lexers improved: - - * Greatly improved the Haskell and OCaml lexers. - * Improved the Bash lexer's handling of nested constructs. - * The C# and Java lexers exhibited abysmal performance with some - input code; this should now be fixed. - * The IRC logs lexer is now able to colorize weechat logs too. - * The Lua lexer now recognizes multi-line comments. - * Fixed bugs in the D and MiniD lexer. - -- The encoding handling of the command line mode (pygmentize) was - enhanced. You shouldn't get UnicodeErrors from it anymore if you - don't give an encoding option. - -- Added a ``-P`` option to the command line mode which can be used to - give options whose values contain commas or equals signs. - -- Added 256-color terminal formatter. - -- Added an experimental SVG formatter. - -- Added the ``lineanchors`` option to the HTML formatter, thanks to - Ian Charnas for the idea. - -- Gave the line numbers table a CSS class in the HTML formatter. - -- Added a Vim 7-like style. - - -Version 0.8.1 -------------- -(released Jun 27, 2007) - -- Fixed POD highlighting in the Ruby lexer. - -- Fixed Unicode class and namespace name highlighting in the C# lexer. - -- Fixed Unicode string prefix highlighting in the Python lexer. - -- Fixed a bug in the D and MiniD lexers. - -- Fixed the included MoinMoin parser. - - -Version 0.8 ------------ -(codename Maikäfer, released May 30, 2007) - -- Lexers added: - - * Haskell, thanks to Adam Blinkinsop - * Redcode, thanks to Adam Blinkinsop - * D, thanks to Kirk McDonald - * MuPad, thanks to Christopher Creutzig - * MiniD, thanks to Jarrett Billingsley - * Vim Script, by Tim Hatch - -- The HTML formatter now has a second line-numbers mode in which it - will just integrate the numbers in the same ``
`` tag as the
-  code.
-
-- The `CSharpLexer` now is Unicode-aware, which means that it has an
-  option that can be set so that it correctly lexes Unicode
-  identifiers allowed by the C# specs.
-
-- Added a `RaiseOnErrorTokenFilter` that raises an exception when the
-  lexer generates an error token, and a `VisibleWhitespaceFilter` that
-  converts whitespace (spaces, tabs, newlines) into visible
-  characters.
-
-- Fixed the `do_insertions()` helper function to yield correct
-  indices.
-
-- The ReST lexer now automatically highlights source code blocks in
-  ".. sourcecode:: language" and ".. code:: language" directive
-  blocks.
-
-- Improved the default style (thanks to Tiberius Teng). The old
-  default is still available as the "emacs" style (which was an alias
-  before).
-
-- The `get_style_defs` method of HTML formatters now uses the
-  `cssclass` option as the default selector if it was given.
-
-- Improved the ReST and Bash lexers a bit.
-
-- Fixed a few bugs in the Makefile and Bash lexers, thanks to Tim
-  Hatch.
-
-- Fixed a bug in the command line code that disallowed ``-O`` options
-  when using the ``-S`` option.
-
-- Fixed a bug in the `RawTokenFormatter`.
-
-
-Version 0.7.1
--------------
-(released Feb 15, 2007)
-
-- Fixed little highlighting bugs in the Python, Java, Scheme and
-  Apache Config lexers.
-
-- Updated the included manpage.
-
-- Included a built version of the documentation in the source tarball.
-
-
-Version 0.7
------------
-(codename Faschingskrapfn, released Feb 14, 2007)
-
-- Added a MoinMoin parser that uses Pygments. With it, you get
-  Pygments highlighting in Moin Wiki pages.
-
-- Changed the exception raised if no suitable lexer, formatter etc. is
-  found in one of the `get_*_by_*` functions to a custom exception,
-  `pygments.util.ClassNotFound`. It is, however, a subclass of
-  `ValueError` in order to retain backwards compatibility.
-
-- Added a `-H` command line option which can be used to get the
-  docstring of a lexer, formatter or filter.
-
-- Made the handling of lexers and formatters more consistent. The
-  aliases and filename patterns of formatters are now attributes on
-  them.
-
-- Added an OCaml lexer, thanks to Adam Blinkinsop.
-
-- Made the HTML formatter more flexible, and easily subclassable in
-  order to make it easy to implement custom wrappers, e.g. alternate
-  line number markup. See the documentation.
-
-- Added an `outencoding` option to all formatters, making it possible
-  to override the `encoding` (which is used by lexers and formatters)
-  when using the command line interface. Also, if using the terminal
-  formatter and the output file is a terminal and has an encoding
-  attribute, use it if no encoding is given.
-
-- Made it possible to just drop style modules into the `styles`
-  subpackage of the Pygments installation.
-
-- Added a "state" keyword argument to the `using` helper.
-
-- Added a `commandprefix` option to the `LatexFormatter` which allows
-  to control how the command names are constructed.
-
-- Added quite a few new lexers, thanks to Tim Hatch:
-
-  * Java Server Pages
-  * Windows batch files
-  * Trac Wiki markup
-  * Python tracebacks
-  * ReStructuredText
-  * Dylan
-  * and the Befunge esoteric programming language (yay!)
-
-- Added Mako lexers by Ben Bangert.
-
-- Added "fruity" style, another dark background originally vim-based
-  theme.
-
-- Added sources.list lexer by Dennis Kaarsemaker.
-
-- Added token stream filters, and a pygmentize option to use them.
-
-- Changed behavior of `in` Operator for tokens.
-
-- Added mimetypes for all lexers.
-
-- Fixed some problems lexing Python strings.
-
-- Fixed tickets: #167, #178, #179, #180, #185, #201.
-
-
-Version 0.6
------------
-(codename Zimtstern, released Dec 20, 2006)
-
-- Added option for the HTML formatter to write the CSS to an external
-  file in "full document" mode.
-
-- Added RTF formatter.
-
-- Added Bash and Apache configuration lexers (thanks to Tim Hatch).
-
-- Improved guessing methods for various lexers.
-
-- Added `@media` support to CSS lexer (thanks to Tim Hatch).
-
-- Added a Groff lexer (thanks to Tim Hatch).
-
-- License change to BSD.
-
-- Added lexers for the Myghty template language.
-
-- Added a Scheme lexer (thanks to Marek Kubica).
-
-- Added some functions to iterate over existing lexers, formatters and
-  lexers.
-
-- The HtmlFormatter's `get_style_defs()` can now take a list as an
-  argument to generate CSS with multiple prefixes.
-
-- Support for guessing input encoding added.
-
-- Encoding support added: all processing is now done with Unicode
-  strings, input and output are converted from and optionally to byte
-  strings (see the ``encoding`` option of lexers and formatters).
-
-- Some improvements in the C(++) lexers handling comments and line
-  continuations.
-
-
-Version 0.5.1
--------------
-(released Oct 30, 2006)
-
-- Fixed traceback in ``pygmentize -L`` (thanks to Piotr Ozarowski).
-
-
-Version 0.5
------------
-(codename PyKleur, released Oct 30, 2006)
-
-- Initial public release.
+Pygments changelog
+==================
+
+Since 2.5.0, issue numbers refer to the tracker at
+,
+pull request numbers to the requests at
+.
+
+
+Version 2.7.0
+-------------
+(released September 12, 2020)
+
+- Added lexers:
+
+  * Arrow (PR#1481, PR#1499)
+  * BARE (PR#1488)
+  * Devicetree (PR#1434)
+  * F* (PR#1409)
+  * GDScript (PR#1457)
+  * Pointless (PR#1494)
+  * PromQL (PR#1506)
+  * PsySH (PR#1438)
+  * Singularity (PR#1285)
+  * TiddlyWiki5 (PR#1390)
+  * TNT (PR#1414)
+  * YANG (PR#1408, PR#1428)
+
+- Updated lexers:
+
+  * APL (PR#1503)
+  * C++ (PR#1350, which also fixes: #1222, #996, #906, #828, #1162, #1166,
+    #1396)
+  * Chapel (PR#1423)
+  * CMake (#1491)
+  * CSound (#1509)
+  * Cython (PR#1507)
+  * Dart (PR#1449)
+  * Fennel (PR#1535)
+  * Fortran (PR#1442)
+  * GAS (PR#1530)
+  * HTTP (PR#1432, #1520, PR#1521)
+  * Inform 6 (PR#1461)
+  * Javascript (PR#1533)
+  * JSON (#1065, PR#1528)
+  * Lean (PR#1415)
+  * Matlab (PR#1399)
+  * Markdown (#1492, PR#1495)
+  * MySQL (#975, #1063, #1453, PR#1527)
+  * NASM (PR#1465)
+  * Nim (PR#1426)
+  * PostgreSQL (PR#1513)
+  * PowerShell (PR#1398, PR#1497)
+  * Protobuf (PR#1505)
+  * Robot (PR#1480)
+  * SQL (PR#1402)
+  * SystemVerilog (PR#1436, PR#1452, PR#1454, PR#1460, PR#1462, PR#1463, PR#1464, PR#1471, #1496, PR#1504)
+  * TeraTerm (PR#1337)
+  * XML (#1502)
+
+- Added a new filter for math symbols (PR#1406)
+- The Kconfig lexer will match Kconfig derivative names now (PR#1458)
+- Improved HTML formatter output (PR#1500)
+- ``.markdown`` is now recognized as an extension for Markdown files (PR#1476)
+- Fixed line number colors for Solarized (PR#1477, #1356)
+- Improvements to exception handling (PR#1478)
+- Improvements to tests (PR#1532, PR#1533, PR#1539)
+- Various code cleanups (PR#1536, PR#1537, PR#1538)
+
+
+Version 2.6.1
+-------------
+(released March 8, 2020)
+
+- This release fixes a packaging issue. No functional changes.
+
+
+Version 2.6
+-----------
+(released March 8, 2020)
+
+- Running Pygments on Python 2.x is no longer supported.
+  (The Python 2 lexer still exists.)
+
+- Added lexers:
+
+  * Linux kernel logs (PR#1310)
+  * LLVM MIR (PR#1361)
+  * MiniScript (PR#1397)
+  * Mosel (PR#1287, PR#1326)
+  * Parsing Expression Grammar (PR#1336)
+  * ReasonML (PR#1386)
+  * Ride (PR#1319, PR#1321)
+  * Sieve (PR#1257)
+  * USD (PR#1290)
+  * WebIDL (PR#1309)
+
+- Updated lexers:
+
+  * Apache2 (PR#1378)
+  * Chapel (PR#1357)
+  * CSound (PR#1383)
+  * D (PR#1375, PR#1362)
+  * Idris (PR#1360)
+  * Perl6/Raku lexer (PR#1344)
+  * Python3 (PR#1382, PR#1385)
+  * Rust: Updated lexer to cover more builtins (mostly macros) and miscellaneous
+    new syntax (PR#1320)
+  * SQL: Add temporal support keywords (PR#1402)
+
+- The 256-color/true-color terminal formatters now support the italic attribute
+  in styles (PR#1288)
+- Support HTTP 2/3 header (PR#1308)
+- Support missing reason in HTTP header (PR#1322)
+- Boogie/Silver: support line continuations and triggers, move contract keywords
+  to separate category (PR#1299)
+- GAS: support C-style comments (PR#1291)
+- Fix names in S lexer (PR#1330, PR#1333)
+- Fix numeric literals in Ada (PR#1334)
+- Recognize ``.mjs`` files as Javascript (PR#1392)
+- Recognize ``.eex`` files as Elixir (PR#1387)
+- Fix ``re.MULTILINE`` usage (PR#1388)
+- Recognize ``pipenv`` and ``poetry`` dependency & lock files (PR#1376)
+- Improve font search on Windows (#1247)
+- Remove unused script block (#1401)
+
+
+Version 2.5.2
+-------------
+(released November 29, 2019)
+
+- Fix incompatibility with some setuptools versions (PR#1316)
+
+- Fix lexing of ReST field lists (PR#1279)
+- Fix lexing of Matlab keywords as field names (PR#1282)
+- Recognize double-quoted strings in Matlab (PR#1278)
+- Avoid slow backtracking in Vim lexer (PR#1312)
+- Fix Scala highlighting of types (PR#1315)
+- Highlight field lists more consistently in ReST (PR#1279)
+- Fix highlighting Matlab keywords in field names (PR#1282)
+- Recognize Matlab double quoted strings (PR#1278)
+- Add some Terraform keywords
+- Update Modelica lexer to 3.4
+- Update Crystal examples
+
+
+Version 2.5.1
+-------------
+(released November 26, 2019)
+
+- This release fixes a packaging issue. No functional changes.
+
+
+Version 2.5.0
+-------------
+(released November 26, 2019)
+
+- Added lexers:
+
+  * Email (PR#1246)
+  * Erlang, Elixir shells (PR#823, #1521)
+  * Notmuch (PR#1264)
+  * `Scdoc `_ (PR#1268)
+  * `Solidity `_ (#1214)
+  * `Zeek `_ (new name for Bro) (PR#1269)
+  * `Zig `_ (PR#820)
+
+- Updated lexers:
+
+  * Apache2 Configuration (PR#1251)
+  * Bash sessions (#1253)
+  * CSound (PR#1250)
+  * Dart
+  * Dockerfile
+  * Emacs Lisp
+  * Handlebars (PR#773)
+  * Java (#1101, #987)
+  * Logtalk (PR#1261)
+  * Matlab (PR#1271)
+  * Praat (PR#1277)
+  * Python3 (PR#1255, PR#1400)
+  * Ruby
+  * YAML (#1528)
+  * Velocity
+
+- Added styles:
+
+  * Inkpot (PR#1276)
+
+- The ``PythonLexer`` class is now an alias for the former ``Python3Lexer``.
+  The old ``PythonLexer`` is available as ``Python2Lexer``.  Same change has
+  been done for the ``PythonTracebackLexer``.  The ``python3`` option for
+  the ``PythonConsoleLexer`` is now true by default.
+
+- Bump ``NasmLexer`` priority over ``TasmLexer`` for ``.asm`` files
+  (fixes #1326)
+- Default font in the ``ImageFormatter`` has been updated (#928, PR#1245)
+- Test suite switched to py.test, removed nose dependency (#1490)
+- Reduce ``TeraTerm`` lexer score -- it used to match nearly all languages
+  (#1256)
+- Treat ``Skylark``/``Starlark`` files as Python files (PR#1259)
+- Image formatter: actually respect ``line_number_separator`` option
+
+- Add LICENSE file to wheel builds
+- Agda: fix lambda highlighting
+- Dart: support ``@`` annotations
+- Dockerfile: accept ``FROM ... AS`` syntax
+- Emacs Lisp: add more string functions
+- GAS: accept registers in directive arguments
+- Java: make structural punctuation (braces, parens, colon, comma) ``Punctuation``, not ``Operator`` (#987)
+- Java: support ``var`` contextual keyword (#1101)
+- Matlab: Fix recognition of ``function`` keyword (PR#1271)
+- Python: recognize ``.jy`` filenames (#976)
+- Python: recognize ``f`` string prefix (#1156)
+- Ruby: support squiggly heredocs
+- Shell sessions: recognize Virtualenv prompt (PR#1266)
+- Velocity: support silent reference syntax
+
+
+Version 2.4.2
+-------------
+(released May 28, 2019)
+
+- Fix encoding error when guessing lexer with given ``encoding`` option
+  (#1438)
+
+
+Version 2.4.1
+-------------
+(released May 24, 2019)
+
+- Updated lexers:
+
+  * Coq (#1430)
+  * MSDOS Session (PR#734)
+  * NASM (#1517)
+  * Objective-C (PR#813, #1508)
+  * Prolog (#1511)
+  * TypeScript (#1515)
+
+- Support CSS variables in stylesheets (PR#814, #1356)
+- Fix F# lexer name (PR#709)
+- Fix ``TerminalFormatter`` using bold for bright text (#1480)
+
+
+Version 2.4.0
+-------------
+(released May 8, 2019)
+
+- Added lexers:
+
+  * Augeas (PR#807)
+  * BBC Basic (PR#806)
+  * Boa (PR#756)
+  * Charm++ CI (PR#788)
+  * DASM16 (PR#807)
+  * FloScript (PR#750)
+  * FreeFem++ (PR#785)
+  * Hspec (PR#790)
+  * Pony (PR#627)
+  * SGF (PR#780)
+  * Slash (PR#807)
+  * Slurm (PR#760)
+  * Tera Term Language (PR#749)
+  * TOML (PR#807)
+  * Unicon (PR#731)
+  * VBScript (PR#673)
+
+- Updated lexers:
+
+  * Apache2 (PR#766)
+  * Cypher (PR#746)
+  * LLVM (PR#792)
+  * Makefiles (PR#766)
+  * PHP (#1482)
+  * Rust
+  * SQL (PR#672)
+  * Stan (PR#774)
+  * Stata (PR#800)
+  * Terraform (PR#787)
+  * YAML
+
+- Add solarized style (PR#708)
+- Add support for Markdown reference-style links (PR#753)
+- Add license information to generated HTML/CSS files (#1496)
+- Change ANSI color names (PR#777)
+- Fix catastrophic backtracking in the bash lexer (#1494)
+- Fix documentation failing to build using Sphinx 2.0 (#1501)
+- Fix incorrect links in the Lisp and R lexer documentation (PR#775)
+- Fix rare unicode errors on Python 2.7 (PR#798, #1492)
+- Fix lexers popping from an empty stack (#1506)
+- TypoScript uses ``.typoscript`` now (#1498)
+- Updated Trove classifiers and ``pip`` requirements (PR#799)
+
+
+
+Version 2.3.1
+-------------
+(released Dec 16, 2018)
+
+- Updated lexers:
+
+  * ASM (PR#784)
+  * Chapel (PR#735)
+  * Clean (PR#621)
+  * CSound (PR#684)
+  * Elm (PR#744)
+  * Fortran (PR#747)
+  * GLSL (PR#740)
+  * Haskell (PR#745)
+  * Hy (PR#754)
+  * Igor Pro (PR#764)
+  * PowerShell (PR#705)
+  * Python (PR#720, #1299, PR#715)
+  * SLexer (PR#680)
+  * YAML (PR#762, PR#724)
+
+- Fix invalid string escape sequences
+- Fix `FutureWarning` introduced by regex changes in Python 3.7
+
+
+Version 2.3.0
+-------------
+(released Nov 25, 2018)
+
+- Added lexers:
+
+  * Fennel (PR#783)
+  * HLSL (PR#675)
+
+- Updated lexers:
+
+  * Dockerfile (PR#714)
+
+- Minimum Python versions changed to 2.7 and 3.5
+- Added support for Python 3.7 generator changes (PR#772)
+- Fix incorrect token type in SCSS for single-quote strings (#1322)
+- Use `terminal256` formatter if `TERM` contains `256` (PR#666)
+- Fix incorrect handling of GitHub style fences in Markdown (PR#741, #1389)
+- Fix `%a` not being highlighted in Python3 strings (PR#727)
+
+
+Version 2.2.0
+-------------
+(released Jan 22, 2017)
+
+- Added lexers:
+
+  * AMPL
+  * TypoScript (#1173)
+  * Varnish config (PR#554)
+  * Clean (PR#503)
+  * WDiff (PR#513)
+  * Flatline (PR#551)
+  * Silver (PR#537)
+  * HSAIL (PR#518)
+  * JSGF (PR#546)
+  * NCAR command language (PR#536)
+  * Extempore (PR#530)
+  * Cap'n Proto (PR#595)
+  * Whiley (PR#573)
+  * Monte (PR#592)
+  * Crystal (PR#576)
+  * Snowball (PR#589)
+  * CapDL (PR#579)
+  * NuSMV (PR#564)
+  * SAS, Stata (PR#593)
+
+- Added the ability to load lexer and formatter classes directly from files
+  with the `-x` command line option and the `lexers.load_lexer_from_file()`
+  and `formatters.load_formatter_from_file()` functions. (PR#559)
+
+- Added `lexers.find_lexer_class_by_name()`. (#1203)
+
+- Added new token types and lexing for magic methods and variables in Python
+  and PHP.
+
+- Added a new token type for string affixes and lexing for them in Python, C++
+  and Postgresql lexers.
+
+- Added a new token type for heredoc (and similar) string delimiters and
+  lexing for them in C++, Perl, PHP, Postgresql and Ruby lexers.
+
+- Styles can now define colors with ANSI colors for use in the 256-color
+  terminal formatter. (PR#531)
+
+- Improved the CSS lexer. (#1083, #1130)
+
+- Added "Rainbow Dash" style. (PR#623)
+
+- Delay loading `pkg_resources`, which takes a long while to import. (PR#690)
+
+
+Version 2.1.3
+-------------
+(released Mar 2, 2016)
+
+- Fixed regression in Bash lexer (PR#563)
+
+
+Version 2.1.2
+-------------
+(released Feb 29, 2016)
+
+- Fixed Python 3 regression in image formatter (#1215)
+- Fixed regression in Bash lexer (PR#562)
+
+
+Version 2.1.1
+-------------
+(relased Feb 14, 2016)
+
+- Fixed Jython compatibility (#1205)
+- Fixed HTML formatter output with leading empty lines (#1111)
+- Added a mapping table for LaTeX encodings and added utf8 (#1152)
+- Fixed image formatter font searching on Macs (#1188)
+- Fixed deepcopy-ing of Token instances (#1168)
+- Fixed Julia string interpolation (#1170)
+- Fixed statefulness of HttpLexer between get_tokens calls
+- Many smaller fixes to various lexers
+
+
+Version 2.1
+-----------
+(released Jan 17, 2016)
+
+- Added lexers:
+
+  * Emacs Lisp (PR#431)
+  * Arduino (PR#442)
+  * Modula-2 with multi-dialect support (#1090)
+  * Fortran fixed format (PR#213)
+  * Archetype Definition language (PR#483)
+  * Terraform (PR#432)
+  * Jcl, Easytrieve (PR#208)
+  * ParaSail (PR#381)
+  * Boogie (PR#420)
+  * Turtle (PR#425)
+  * Fish Shell (PR#422)
+  * Roboconf (PR#449)
+  * Test Anything Protocol (PR#428)
+  * Shen (PR#385)
+  * Component Pascal (PR#437)
+  * SuperCollider (PR#472)
+  * Shell consoles (Tcsh, PowerShell, MSDOS) (PR#479)
+  * Elm and J (PR#452)
+  * Crmsh (PR#440)
+  * Praat (PR#492)
+  * CSound (PR#494)
+  * Ezhil (PR#443)
+  * Thrift (PR#469)
+  * QVT Operational (PR#204)
+  * Hexdump (PR#508)
+  * CAmkES Configuration (PR#462)
+
+- Added styles:
+
+  * Lovelace (PR#456)
+  * Algol and Algol-nu (#1090)
+
+- Added formatters:
+
+  * IRC (PR#458)
+  * True color (24-bit) terminal ANSI sequences (#1142)
+    (formatter alias: "16m")
+
+- New "filename" option for HTML formatter (PR#527).
+
+- Improved performance of the HTML formatter for long lines (PR#504).
+
+- Updated autopygmentize script (PR#445).
+
+- Fixed style inheritance for non-standard token types in HTML output.
+
+- Added support for async/await to Python 3 lexer.
+
+- Rewrote linenos option for TerminalFormatter (it's better, but slightly
+  different output than before) (#1147).
+
+- Javascript lexer now supports most of ES6 (#1100).
+
+- Cocoa builtins updated for iOS 8.1 (PR#433).
+
+- Combined BashSessionLexer and ShellSessionLexer, new version should support
+  the prompt styles of either.
+
+- Added option to pygmentize to show a full traceback on exceptions.
+
+- Fixed incomplete output on Windows and Python 3 (e.g. when using iPython
+  Notebook) (#1153).
+
+- Allowed more traceback styles in Python console lexer (PR#253).
+
+- Added decorators to TypeScript (PR#509).
+
+- Fix highlighting of certain IRC logs formats (#1076).
+
+
+Version 2.0.2
+-------------
+(released Jan 20, 2015)
+
+- Fix Python tracebacks getting duplicated in the console lexer (#1068).
+
+- Backquote-delimited identifiers are now recognized in F# (#1062).
+
+
+Version 2.0.1
+-------------
+(released Nov 10, 2014)
+
+- Fix an encoding issue when using ``pygmentize`` with the ``-o`` option.
+
+
+Version 2.0
+-----------
+(released Nov 9, 2014)
+
+- Default lexer encoding is now "guess", i.e. UTF-8 / Locale / Latin1 is
+  tried in that order.
+
+- Major update to Swift lexer (PR#410).
+
+- Multiple fixes to lexer guessing in conflicting cases:
+
+  * recognize HTML5 by doctype
+  * recognize XML by XML declaration
+  * don't recognize C/C++ as SystemVerilog
+
+- Simplified regexes and builtin lists.
+
+
+Version 2.0rc1
+--------------
+(released Oct 16, 2014)
+
+- Dropped Python 2.4 and 2.5 compatibility.  This is in favor of single-source
+  compatibility between Python 2.6, 2.7 and 3.3+.
+
+- New website and documentation based on Sphinx (finally!)
+
+- Lexers added:
+
+  * APL (#969)
+  * Agda and Literate Agda (PR#203)
+  * Alloy (PR#355)
+  * AmbientTalk
+  * BlitzBasic (PR#197)
+  * ChaiScript (PR#24)
+  * Chapel (PR#256)
+  * Cirru (PR#275)
+  * Clay (PR#184)
+  * ColdFusion CFC (PR#283)
+  * Cryptol and Literate Cryptol (PR#344)
+  * Cypher (PR#257)
+  * Docker config files
+  * EBNF (PR#193)
+  * Eiffel (PR#273)
+  * GAP (PR#311)
+  * Golo (PR#309)
+  * Handlebars (PR#186)
+  * Hy (PR#238)
+  * Idris and Literate Idris (PR#210)
+  * Igor Pro (PR#172)
+  * Inform 6/7 (PR#281)
+  * Intel objdump (PR#279)
+  * Isabelle (PR#386)
+  * Jasmin (PR#349)
+  * JSON-LD (PR#289)
+  * Kal (PR#233)
+  * Lean (PR#399)
+  * LSL (PR#296)
+  * Limbo (PR#291)
+  * Liquid (#977)
+  * MQL (PR#285)
+  * MaskJS (PR#280)
+  * Mozilla preprocessors
+  * Mathematica (PR#245)
+  * NesC (PR#166)
+  * Nit (PR#375)
+  * Nix (PR#267)
+  * Pan
+  * Pawn (PR#211)
+  * Perl 6 (PR#181)
+  * Pig (PR#304)
+  * Pike (PR#237)
+  * QBasic (PR#182)
+  * Red (PR#341)
+  * ResourceBundle (#1038)
+  * Rexx (PR#199)
+  * Rql (PR#251)
+  * Rsl
+  * SPARQL (PR#78)
+  * Slim (PR#366)
+  * Swift (PR#371)
+  * Swig (PR#168)
+  * TADS 3 (PR#407)
+  * Todo.txt todo lists
+  * Twig (PR#404)
+
+- Added a helper to "optimize" regular expressions that match one of many
+  literal words; this can save 20% and more lexing time with lexers that
+  highlight many keywords or builtins.
+
+- New styles: "xcode" and "igor", similar to the default highlighting of
+  the respective IDEs.
+
+- The command-line "pygmentize" tool now tries a little harder to find the
+  correct encoding for files and the terminal (#979).
+
+- Added "inencoding" option for lexers to override "encoding" analogous
+  to "outencoding" (#800).
+
+- Added line-by-line "streaming" mode for pygmentize with the "-s" option.
+  (PR#165)  Only fully works for lexers that have no constructs spanning
+  lines!
+
+- Added an "envname" option to the LaTeX formatter to select a replacement
+  verbatim environment (PR#235).
+
+- Updated the Makefile lexer to yield a little more useful highlighting.
+
+- Lexer aliases passed to ``get_lexer_by_name()`` are now case-insensitive.
+
+- File name matching in lexers and formatters will now use a regex cache
+  for speed (PR#205).
+
+- Pygments will now recognize "vim" modelines when guessing the lexer for
+  a file based on content (PR#118).
+
+- Major restructure of the ``pygments.lexers`` module namespace.  There are now
+  many more modules with less lexers per module.  Old modules are still around
+  and re-export the lexers they previously contained.
+
+- The NameHighlightFilter now works with any Name.* token type (#790).
+
+- Python 3 lexer: add new exceptions from PEP 3151.
+
+- Opa lexer: add new keywords (PR#170).
+
+- Julia lexer: add keywords and underscore-separated number
+  literals (PR#176).
+
+- Lasso lexer: fix method highlighting, update builtins. Fix
+  guessing so that plain XML isn't always taken as Lasso (PR#163).
+
+- Objective C/C++ lexers: allow "@" prefixing any expression (#871).
+
+- Ruby lexer: fix lexing of Name::Space tokens (#860) and of symbols
+  in hashes (#873).
+
+- Stan lexer: update for version 2.4.0 of the language (PR#162, PR#255, PR#377).
+
+- JavaScript lexer: add the "yield" keyword (PR#196).
+
+- HTTP lexer: support for PATCH method (PR#190).
+
+- Koka lexer: update to newest language spec (PR#201).
+
+- Haxe lexer: rewrite and support for Haxe 3 (PR#174).
+
+- Prolog lexer: add different kinds of numeric literals (#864).
+
+- F# lexer: rewrite with newest spec for F# 3.0 (#842), fix a bug with
+  dotted chains (#948).
+
+- Kotlin lexer: general update (PR#271).
+
+- Rebol lexer: fix comment detection and analyse_text (PR#261).
+
+- LLVM lexer: update keywords to v3.4 (PR#258).
+
+- PHP lexer: add new keywords and binary literals (PR#222).
+
+- external/markdown-processor.py updated to newest python-markdown (PR#221).
+
+- CSS lexer: some highlighting order fixes (PR#231).
+
+- Ceylon lexer: fix parsing of nested multiline comments (#915).
+
+- C family lexers: fix parsing of indented preprocessor directives (#944).
+
+- Rust lexer: update to 0.9 language version (PR#270, PR#388).
+
+- Elixir lexer: update to 0.15 language version (PR#392).
+
+- Fix swallowing incomplete tracebacks in Python console lexer (#874).
+
+
+Version 1.6
+-----------
+(released Feb 3, 2013)
+
+- Lexers added:
+
+  * Dylan console (PR#149)
+  * Logos (PR#150)
+  * Shell sessions (PR#158)
+
+- Fix guessed lexers not receiving lexer options (#838).
+
+- Fix unquoted HTML attribute lexing in Opa (#841).
+
+- Fixes to the Dart lexer (PR#160).
+
+
+Version 1.6rc1
+--------------
+(released Jan 9, 2013)
+
+- Lexers added:
+
+  * AspectJ (PR#90)
+  * AutoIt (PR#122)
+  * BUGS-like languages (PR#89)
+  * Ceylon (PR#86)
+  * Croc (new name for MiniD)
+  * CUDA (PR#75)
+  * Dg (PR#116)
+  * IDL (PR#115)
+  * Jags (PR#89)
+  * Julia (PR#61)
+  * Kconfig (#711)
+  * Lasso (PR#95, PR#113)
+  * LiveScript (PR#84)
+  * Monkey (PR#117)
+  * Mscgen (PR#80)
+  * NSIS scripts (PR#136)
+  * OpenCOBOL (PR#72)
+  * QML (PR#123)
+  * Puppet (PR#133)
+  * Racket (PR#94)
+  * Rdoc (PR#99)
+  * Robot Framework (PR#137)
+  * RPM spec files (PR#124)
+  * Rust (PR#67)
+  * Smali (Dalvik assembly)
+  * SourcePawn (PR#39)
+  * Stan (PR#89)
+  * Treetop (PR#125)
+  * TypeScript (PR#114)
+  * VGL (PR#12)
+  * Visual FoxPro (#762)
+  * Windows Registry (#819)
+  * Xtend (PR#68)
+
+- The HTML formatter now supports linking to tags using CTags files, when the
+  python-ctags package is installed (PR#87).
+
+- The HTML formatter now has a "linespans" option that wraps every line in a
+   tag with a specific id (PR#82).
+
+- When deriving a lexer from another lexer with token definitions, definitions
+  for states not in the child lexer are now inherited.  If you override a state
+  in the child lexer, an "inherit" keyword has been added to insert the base
+  state at that position (PR#141).
+
+- The C family lexers now inherit token definitions from a common base class,
+  removing code duplication (PR#141).
+
+- Use "colorama" on Windows for console color output (PR#142).
+
+- Fix Template Haskell highlighting (PR#63).
+
+- Fix some S/R lexer errors (PR#91).
+
+- Fix a bug in the Prolog lexer with names that start with 'is' (#810).
+
+- Rewrite Dylan lexer, add Dylan LID lexer (PR#147).
+
+- Add a Java quickstart document (PR#146).
+
+- Add a "external/autopygmentize" file that can be used as .lessfilter (#802).
+
+
+Version 1.5
+-----------
+(codename Zeitdilatation, released Mar 10, 2012)
+
+- Lexers added:
+
+  * Awk (#630)
+  * Fancy (#633)
+  * PyPy Log
+  * eC
+  * Nimrod
+  * Nemerle (#667)
+  * F# (#353)
+  * Groovy (#501)
+  * PostgreSQL (#660)
+  * DTD
+  * Gosu (#634)
+  * Octave (PR#22)
+  * Standard ML (PR#14)
+  * CFengine3 (#601)
+  * Opa (PR#37)
+  * HTTP sessions (PR#42)
+  * JSON (PR#31)
+  * SNOBOL (PR#30)
+  * MoonScript (PR#43)
+  * ECL (PR#29)
+  * Urbiscript (PR#17)
+  * OpenEdge ABL (PR#27)
+  * SystemVerilog (PR#35)
+  * Coq (#734)
+  * PowerShell (#654)
+  * Dart (#715)
+  * Fantom (PR#36)
+  * Bro (PR#5)
+  * NewLISP (PR#26)
+  * VHDL (PR#45)
+  * Scilab (#740)
+  * Elixir (PR#57)
+  * Tea (PR#56)
+  * Kotlin (PR#58)
+
+- Fix Python 3 terminal highlighting with pygmentize (#691).
+
+- In the LaTeX formatter, escape special &, < and > chars (#648).
+
+- In the LaTeX formatter, fix display problems for styles with token
+  background colors (#670).
+
+- Enhancements to the Squid conf lexer (#664).
+
+- Several fixes to the reStructuredText lexer (#636).
+
+- Recognize methods in the ObjC lexer (#638).
+
+- Fix Lua "class" highlighting: it does not have classes (#665).
+
+- Fix degenerate regex in Scala lexer (#671) and highlighting bugs (#713, 708).
+
+- Fix number pattern order in Ocaml lexer (#647).
+
+- Fix generic type highlighting in ActionScript 3 (#666).
+
+- Fixes to the Clojure lexer (PR#9).
+
+- Fix degenerate regex in Nemerle lexer (#706).
+
+- Fix infinite looping in CoffeeScript lexer (#729).
+
+- Fix crashes and analysis with ObjectiveC lexer (#693, #696).
+
+- Add some Fortran 2003 keywords.
+
+- Fix Boo string regexes (#679).
+
+- Add "rrt" style (#727).
+
+- Fix infinite looping in Darcs Patch lexer.
+
+- Lots of misc fixes to character-eating bugs and ordering problems in many
+  different lexers.
+
+
+Version 1.4
+-----------
+(codename Unschärfe, released Jan 03, 2011)
+
+- Lexers added:
+
+  * Factor (#520)
+  * PostScript (#486)
+  * Verilog (#491)
+  * BlitzMax Basic (#478)
+  * Ioke (#465)
+  * Java properties, split out of the INI lexer (#445)
+  * Scss (#509)
+  * Duel/JBST
+  * XQuery (#617)
+  * Mason (#615)
+  * GoodData (#609)
+  * SSP (#473)
+  * Autohotkey (#417)
+  * Google Protocol Buffers
+  * Hybris (#506)
+
+- Do not fail in analyse_text methods (#618).
+
+- Performance improvements in the HTML formatter (#523).
+
+- With the ``noclasses`` option in the HTML formatter, some styles
+  present in the stylesheet were not added as inline styles.
+
+- Four fixes to the Lua lexer (#480, #481, #482, #497).
+
+- More context-sensitive Gherkin lexer with support for more i18n translations.
+
+- Support new OO keywords in Matlab lexer (#521).
+
+- Small fix in the CoffeeScript lexer (#519).
+
+- A bugfix for backslashes in ocaml strings (#499).
+
+- Fix unicode/raw docstrings in the Python lexer (#489).
+
+- Allow PIL to work without PIL.pth (#502).
+
+- Allow seconds as a unit in CSS (#496).
+
+- Support ``application/javascript`` as a JavaScript mime type (#504).
+
+- Support `Offload `_ C++ Extensions as
+  keywords in the C++ lexer (#484).
+
+- Escape more characters in LaTeX output (#505).
+
+- Update Haml/Sass lexers to version 3 (#509).
+
+- Small PHP lexer string escaping fix (#515).
+
+- Support comments before preprocessor directives, and unsigned/
+  long long literals in C/C++ (#613, #616).
+
+- Support line continuations in the INI lexer (#494).
+
+- Fix lexing of Dylan string and char literals (#628).
+
+- Fix class/procedure name highlighting in VB.NET lexer (#624).
+
+
+Version 1.3.1
+-------------
+(bugfix release, released Mar 05, 2010)
+
+- The ``pygmentize`` script was missing from the distribution.
+
+
+Version 1.3
+-----------
+(codename Schneeglöckchen, released Mar 01, 2010)
+
+- Added the ``ensurenl`` lexer option, which can be used to suppress the
+  automatic addition of a newline to the lexer input.
+
+- Lexers added:
+
+  * Ada
+  * Coldfusion
+  * Modula-2
+  * Haxe
+  * R console
+  * Objective-J
+  * Haml and Sass
+  * CoffeeScript
+
+- Enhanced reStructuredText highlighting.
+
+- Added support for PHP 5.3 namespaces in the PHP lexer.
+
+- Added a bash completion script for `pygmentize`, to the external/
+  directory (#466).
+
+- Fixed a bug in `do_insertions()` used for multi-lexer languages.
+
+- Fixed a Ruby regex highlighting bug (#476).
+
+- Fixed regex highlighting bugs in Perl lexer (#258).
+
+- Add small enhancements to the C lexer (#467) and Bash lexer (#469).
+
+- Small fixes for the Tcl, Debian control file, Nginx config,
+  Smalltalk, Objective-C, Clojure, Lua lexers.
+
+- Gherkin lexer: Fixed single apostrophe bug and added new i18n keywords.
+
+
+Version 1.2.2
+-------------
+(bugfix release, released Jan 02, 2010)
+
+* Removed a backwards incompatibility in the LaTeX formatter that caused
+  Sphinx to produce invalid commands when writing LaTeX output (#463).
+
+* Fixed a forever-backtracking regex in the BashLexer (#462).
+
+
+Version 1.2.1
+-------------
+(bugfix release, released Jan 02, 2010)
+
+* Fixed mishandling of an ellipsis in place of the frames in a Python
+  console traceback, resulting in clobbered output.
+
+
+Version 1.2
+-----------
+(codename Neujahr, released Jan 01, 2010)
+
+- Dropped Python 2.3 compatibility.
+
+- Lexers added:
+
+  * Asymptote
+  * Go
+  * Gherkin (Cucumber)
+  * CMake
+  * Ooc
+  * Coldfusion
+  * Haxe
+  * R console
+
+- Added options for rendering LaTeX in source code comments in the
+  LaTeX formatter (#461).
+
+- Updated the Logtalk lexer.
+
+- Added `line_number_start` option to image formatter (#456).
+
+- Added `hl_lines` and `hl_color` options to image formatter (#457).
+
+- Fixed the HtmlFormatter's handling of noclasses=True to not output any
+  classes (#427).
+
+- Added the Monokai style (#453).
+
+- Fixed LLVM lexer identifier syntax and added new keywords (#442).
+
+- Fixed the PythonTracebackLexer to handle non-traceback data in header or
+  trailer, and support more partial tracebacks that start on line 2 (#437).
+
+- Fixed the CLexer to not highlight ternary statements as labels.
+
+- Fixed lexing of some Ruby quoting peculiarities (#460).
+
+- A few ASM lexer fixes (#450).
+
+
+Version 1.1.1
+-------------
+(bugfix release, released Sep 15, 2009)
+
+- Fixed the BBCode lexer (#435).
+
+- Added support for new Jinja2 keywords.
+
+- Fixed test suite failures.
+
+- Added Gentoo-specific suffixes to Bash lexer.
+
+
+Version 1.1
+-----------
+(codename Brillouin, released Sep 11, 2009)
+
+- Ported Pygments to Python 3.  This needed a few changes in the way
+  encodings are handled; they may affect corner cases when used with
+  Python 2 as well.
+
+- Lexers added:
+
+  * Antlr/Ragel, thanks to Ana Nelson
+  * (Ba)sh shell
+  * Erlang shell
+  * GLSL
+  * Prolog
+  * Evoque
+  * Modelica
+  * Rebol
+  * MXML
+  * Cython
+  * ABAP
+  * ASP.net (VB/C#)
+  * Vala
+  * Newspeak
+
+- Fixed the LaTeX formatter's output so that output generated for one style
+  can be used with the style definitions of another (#384).
+
+- Added "anchorlinenos" and "noclobber_cssfile" (#396) options to HTML
+  formatter.
+
+- Support multiline strings in Lua lexer.
+
+- Rewrite of the JavaScript lexer by Pumbaa80 to better support regular
+  expression literals (#403).
+
+- When pygmentize is asked to highlight a file for which multiple lexers
+  match the filename, use the analyse_text guessing engine to determine the
+  winner (#355).
+
+- Fixed minor bugs in the JavaScript lexer (#383), the Matlab lexer (#378),
+  the Scala lexer (#392), the INI lexer (#391), the Clojure lexer (#387)
+  and the AS3 lexer (#389).
+
+- Fixed three Perl heredoc lexing bugs (#379, #400, #422).
+
+- Fixed a bug in the image formatter which misdetected lines (#380).
+
+- Fixed bugs lexing extended Ruby strings and regexes.
+
+- Fixed a bug when lexing git diffs.
+
+- Fixed a bug lexing the empty commit in the PHP lexer (#405).
+
+- Fixed a bug causing Python numbers to be mishighlighted as floats (#397).
+
+- Fixed a bug when backslashes are used in odd locations in Python (#395).
+
+- Fixed various bugs in Matlab and S-Plus lexers, thanks to Winston Chang (#410,
+  #411, #413, #414) and fmarc (#419).
+
+- Fixed a bug in Haskell single-line comment detection (#426).
+
+- Added new-style reStructuredText directive for docutils 0.5+ (#428).
+
+
+Version 1.0
+-----------
+(codename Dreiundzwanzig, released Nov 23, 2008)
+
+- Don't use join(splitlines()) when converting newlines to ``\n``,
+  because that doesn't keep all newlines at the end when the
+  ``stripnl`` lexer option is False.
+
+- Added ``-N`` option to command-line interface to get a lexer name
+  for a given filename.
+
+- Added Tango style, written by Andre Roberge for the Crunchy project.
+
+- Added Python3TracebackLexer and ``python3`` option to
+  PythonConsoleLexer.
+
+- Fixed a few bugs in the Haskell lexer.
+
+- Fixed PythonTracebackLexer to be able to recognize SyntaxError and
+  KeyboardInterrupt (#360).
+
+- Provide one formatter class per image format, so that surprises like::
+
+    pygmentize -f gif -o foo.gif foo.py
+
+  creating a PNG file are avoided.
+
+- Actually use the `font_size` option of the image formatter.
+
+- Fixed numpy lexer that it doesn't listen for `*.py` any longer.
+
+- Fixed HTML formatter so that text options can be Unicode
+  strings (#371).
+
+- Unified Diff lexer supports the "udiff" alias now.
+
+- Fixed a few issues in Scala lexer (#367).
+
+- RubyConsoleLexer now supports simple prompt mode (#363).
+
+- JavascriptLexer is smarter about what constitutes a regex (#356).
+
+- Add Applescript lexer, thanks to Andreas Amann (#330).
+
+- Make the codetags more strict about matching words (#368).
+
+- NginxConfLexer is a little more accurate on mimetypes and
+  variables (#370).
+
+
+Version 0.11.1
+--------------
+(released Aug 24, 2008)
+
+- Fixed a Jython compatibility issue in pygments.unistring (#358).
+
+
+Version 0.11
+------------
+(codename Straußenei, released Aug 23, 2008)
+
+Many thanks go to Tim Hatch for writing or integrating most of the bug
+fixes and new features.
+
+- Lexers added:
+
+  * Nasm-style assembly language, thanks to delroth
+  * YAML, thanks to Kirill Simonov
+  * ActionScript 3, thanks to Pierre Bourdon
+  * Cheetah/Spitfire templates, thanks to Matt Good
+  * Lighttpd config files
+  * Nginx config files
+  * Gnuplot plotting scripts
+  * Clojure
+  * POV-Ray scene files
+  * Sqlite3 interactive console sessions
+  * Scala source files, thanks to Krzysiek Goj
+
+- Lexers improved:
+
+  * C lexer highlights standard library functions now and supports C99
+    types.
+  * Bash lexer now correctly highlights heredocs without preceding
+    whitespace.
+  * Vim lexer now highlights hex colors properly and knows a couple
+    more keywords.
+  * Irc logs lexer now handles xchat's default time format (#340) and
+    correctly highlights lines ending in ``>``.
+  * Support more delimiters for perl regular expressions (#258).
+  * ObjectiveC lexer now supports 2.0 features.
+
+- Added "Visual Studio" style.
+
+- Updated markdown processor to Markdown 1.7.
+
+- Support roman/sans/mono style defs and use them in the LaTeX
+  formatter.
+
+- The RawTokenFormatter is no longer registered to ``*.raw`` and it's
+  documented that tokenization with this lexer may raise exceptions.
+
+- New option ``hl_lines`` to HTML formatter, to highlight certain
+  lines.
+
+- New option ``prestyles`` to HTML formatter.
+
+- New option *-g* to pygmentize, to allow lexer guessing based on
+  filetext (can be slowish, so file extensions are still checked
+  first).
+
+- ``guess_lexer()`` now makes its decision much faster due to a cache
+  of whether data is xml-like (a check which is used in several
+  versions of ``analyse_text()``.  Several lexers also have more
+  accurate ``analyse_text()`` now.
+
+
+Version 0.10
+------------
+(codename Malzeug, released May 06, 2008)
+
+- Lexers added:
+
+  * Io
+  * Smalltalk
+  * Darcs patches
+  * Tcl
+  * Matlab
+  * Matlab sessions
+  * FORTRAN
+  * XSLT
+  * tcsh
+  * NumPy
+  * Python 3
+  * S, S-plus, R statistics languages
+  * Logtalk
+
+- In the LatexFormatter, the *commandprefix* option is now by default
+  'PY' instead of 'C', since the latter resulted in several collisions
+  with other packages.  Also, the special meaning of the *arg*
+  argument to ``get_style_defs()`` was removed.
+
+- Added ImageFormatter, to format code as PNG, JPG, GIF or BMP.
+  (Needs the Python Imaging Library.)
+
+- Support doc comments in the PHP lexer.
+
+- Handle format specifications in the Perl lexer.
+
+- Fix comment handling in the Batch lexer.
+
+- Add more file name extensions for the C++, INI and XML lexers.
+
+- Fixes in the IRC and MuPad lexers.
+
+- Fix function and interface name highlighting in the Java lexer.
+
+- Fix at-rule handling in the CSS lexer.
+
+- Handle KeyboardInterrupts gracefully in pygmentize.
+
+- Added BlackWhiteStyle.
+
+- Bash lexer now correctly highlights math, does not require
+  whitespace after semicolons, and correctly highlights boolean
+  operators.
+
+- Makefile lexer is now capable of handling BSD and GNU make syntax.
+
+
+Version 0.9
+-----------
+(codename Herbstzeitlose, released Oct 14, 2007)
+
+- Lexers added:
+
+  * Erlang
+  * ActionScript
+  * Literate Haskell
+  * Common Lisp
+  * Various assembly languages
+  * Gettext catalogs
+  * Squid configuration
+  * Debian control files
+  * MySQL-style SQL
+  * MOOCode
+
+- Lexers improved:
+
+  * Greatly improved the Haskell and OCaml lexers.
+  * Improved the Bash lexer's handling of nested constructs.
+  * The C# and Java lexers exhibited abysmal performance with some
+    input code; this should now be fixed.
+  * The IRC logs lexer is now able to colorize weechat logs too.
+  * The Lua lexer now recognizes multi-line comments.
+  * Fixed bugs in the D and MiniD lexer.
+
+- The encoding handling of the command line mode (pygmentize) was
+  enhanced. You shouldn't get UnicodeErrors from it anymore if you
+  don't give an encoding option.
+
+- Added a ``-P`` option to the command line mode which can be used to
+  give options whose values contain commas or equals signs.
+
+- Added 256-color terminal formatter.
+
+- Added an experimental SVG formatter.
+
+- Added the ``lineanchors`` option to the HTML formatter, thanks to
+  Ian Charnas for the idea.
+
+- Gave the line numbers table a CSS class in the HTML formatter.
+
+- Added a Vim 7-like style.
+
+
+Version 0.8.1
+-------------
+(released Jun 27, 2007)
+
+- Fixed POD highlighting in the Ruby lexer.
+
+- Fixed Unicode class and namespace name highlighting in the C# lexer.
+
+- Fixed Unicode string prefix highlighting in the Python lexer.
+
+- Fixed a bug in the D and MiniD lexers.
+
+- Fixed the included MoinMoin parser.
+
+
+Version 0.8
+-----------
+(codename Maikäfer, released May 30, 2007)
+
+- Lexers added:
+
+  * Haskell, thanks to Adam Blinkinsop
+  * Redcode, thanks to Adam Blinkinsop
+  * D, thanks to Kirk McDonald
+  * MuPad, thanks to Christopher Creutzig
+  * MiniD, thanks to Jarrett Billingsley
+  * Vim Script, by Tim Hatch
+
+- The HTML formatter now has a second line-numbers mode in which it
+  will just integrate the numbers in the same ``
`` tag as the
+  code.
+
+- The `CSharpLexer` now is Unicode-aware, which means that it has an
+  option that can be set so that it correctly lexes Unicode
+  identifiers allowed by the C# specs.
+
+- Added a `RaiseOnErrorTokenFilter` that raises an exception when the
+  lexer generates an error token, and a `VisibleWhitespaceFilter` that
+  converts whitespace (spaces, tabs, newlines) into visible
+  characters.
+
+- Fixed the `do_insertions()` helper function to yield correct
+  indices.
+
+- The ReST lexer now automatically highlights source code blocks in
+  ".. sourcecode:: language" and ".. code:: language" directive
+  blocks.
+
+- Improved the default style (thanks to Tiberius Teng). The old
+  default is still available as the "emacs" style (which was an alias
+  before).
+
+- The `get_style_defs` method of HTML formatters now uses the
+  `cssclass` option as the default selector if it was given.
+
+- Improved the ReST and Bash lexers a bit.
+
+- Fixed a few bugs in the Makefile and Bash lexers, thanks to Tim
+  Hatch.
+
+- Fixed a bug in the command line code that disallowed ``-O`` options
+  when using the ``-S`` option.
+
+- Fixed a bug in the `RawTokenFormatter`.
+
+
+Version 0.7.1
+-------------
+(released Feb 15, 2007)
+
+- Fixed little highlighting bugs in the Python, Java, Scheme and
+  Apache Config lexers.
+
+- Updated the included manpage.
+
+- Included a built version of the documentation in the source tarball.
+
+
+Version 0.7
+-----------
+(codename Faschingskrapfn, released Feb 14, 2007)
+
+- Added a MoinMoin parser that uses Pygments. With it, you get
+  Pygments highlighting in Moin Wiki pages.
+
+- Changed the exception raised if no suitable lexer, formatter etc. is
+  found in one of the `get_*_by_*` functions to a custom exception,
+  `pygments.util.ClassNotFound`. It is, however, a subclass of
+  `ValueError` in order to retain backwards compatibility.
+
+- Added a `-H` command line option which can be used to get the
+  docstring of a lexer, formatter or filter.
+
+- Made the handling of lexers and formatters more consistent. The
+  aliases and filename patterns of formatters are now attributes on
+  them.
+
+- Added an OCaml lexer, thanks to Adam Blinkinsop.
+
+- Made the HTML formatter more flexible, and easily subclassable in
+  order to make it easy to implement custom wrappers, e.g. alternate
+  line number markup. See the documentation.
+
+- Added an `outencoding` option to all formatters, making it possible
+  to override the `encoding` (which is used by lexers and formatters)
+  when using the command line interface. Also, if using the terminal
+  formatter and the output file is a terminal and has an encoding
+  attribute, use it if no encoding is given.
+
+- Made it possible to just drop style modules into the `styles`
+  subpackage of the Pygments installation.
+
+- Added a "state" keyword argument to the `using` helper.
+
+- Added a `commandprefix` option to the `LatexFormatter` which allows
+  to control how the command names are constructed.
+
+- Added quite a few new lexers, thanks to Tim Hatch:
+
+  * Java Server Pages
+  * Windows batch files
+  * Trac Wiki markup
+  * Python tracebacks
+  * ReStructuredText
+  * Dylan
+  * and the Befunge esoteric programming language (yay!)
+
+- Added Mako lexers by Ben Bangert.
+
+- Added "fruity" style, another dark background originally vim-based
+  theme.
+
+- Added sources.list lexer by Dennis Kaarsemaker.
+
+- Added token stream filters, and a pygmentize option to use them.
+
+- Changed behavior of `in` Operator for tokens.
+
+- Added mimetypes for all lexers.
+
+- Fixed some problems lexing Python strings.
+
+- Fixed tickets: #167, #178, #179, #180, #185, #201.
+
+
+Version 0.6
+-----------
+(codename Zimtstern, released Dec 20, 2006)
+
+- Added option for the HTML formatter to write the CSS to an external
+  file in "full document" mode.
+
+- Added RTF formatter.
+
+- Added Bash and Apache configuration lexers (thanks to Tim Hatch).
+
+- Improved guessing methods for various lexers.
+
+- Added `@media` support to CSS lexer (thanks to Tim Hatch).
+
+- Added a Groff lexer (thanks to Tim Hatch).
+
+- License change to BSD.
+
+- Added lexers for the Myghty template language.
+
+- Added a Scheme lexer (thanks to Marek Kubica).
+
+- Added some functions to iterate over existing lexers, formatters and
+  lexers.
+
+- The HtmlFormatter's `get_style_defs()` can now take a list as an
+  argument to generate CSS with multiple prefixes.
+
+- Support for guessing input encoding added.
+
+- Encoding support added: all processing is now done with Unicode
+  strings, input and output are converted from and optionally to byte
+  strings (see the ``encoding`` option of lexers and formatters).
+
+- Some improvements in the C(++) lexers handling comments and line
+  continuations.
+
+
+Version 0.5.1
+-------------
+(released Oct 30, 2006)
+
+- Fixed traceback in ``pygmentize -L`` (thanks to Piotr Ozarowski).
+
+
+Version 0.5
+-----------
+(codename PyKleur, released Oct 30, 2006)
+
+- Initial public release.
diff --git a/LICENSE b/LICENSE
old mode 100644
new mode 100755
index 13d1c74..4ba76a5
--- a/LICENSE
+++ b/LICENSE
@@ -1,25 +1,25 @@
-Copyright (c) 2006-2019 by the respective authors (see AUTHORS file).
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-* Redistributions of source code must retain the above copyright
-  notice, this list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright
-  notice, this list of conditions and the following disclaimer in the
-  documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+Copyright (c) 2006-2020 by the respective authors (see AUTHORS file).
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright
+  notice, this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright
+  notice, this list of conditions and the following disclaimer in the
+  documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/MANIFEST.in b/MANIFEST.in
old mode 100644
new mode 100755
index c6a8567..001eb6b
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,5 +1,5 @@
-include Makefile CHANGES LICENSE AUTHORS
-include external/*
-recursive-include tests *
-recursive-include doc *
-recursive-include scripts *
+include Makefile CHANGES LICENSE AUTHORS
+include external/*
+recursive-include tests *
+recursive-include doc *
+recursive-include scripts *
diff --git a/Makefile b/Makefile
old mode 100644
new mode 100755
index 13813ad..a6023d8
--- a/Makefile
+++ b/Makefile
@@ -1,73 +1,74 @@
-#
-# Makefile for Pygments
-# ~~~~~~~~~~~~~~~~~~~~~
-#
-# Combines scripts for common tasks.
-#
-# :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
-# :license: BSD, see LICENSE for details.
-#
-
-PYTHON ?= python
-
-export PYTHONPATH = $(shell echo "$$PYTHONPATH"):$(shell python -c 'import os; print ":".join(os.path.abspath(line.strip()) for line in file("PYTHONPATH"))' 2>/dev/null)
-
-.PHONY: all check clean clean-pyc codetags docs mapfiles \
-	pylint reindent test test-coverage
-
-all: clean-pyc check test
-
-check:
-	@$(PYTHON) scripts/detect_missing_analyse_text.py || true
-	@pyflakes pygments | grep -v 'but unused' || true
-	@$(PYTHON) scripts/check_sources.py -i build -i dist -i pygments/lexers/_mapping.py \
-		   -i docs/build -i pygments/formatters/_mapping.py -i pygments/unistring.py
-
-clean: clean-pyc
-	-rm -rf build tests/examplefiles/output
-	-rm -f codetags.html
-
-clean-pyc:
-	find . -name '*.pyc' -exec rm -f {} +
-	find . -name '*.pyo' -exec rm -f {} +
-	find . -name '*~' -exec rm -f {} +
-
-codetags:
-	@$(PYTHON) scripts/find_codetags.py -i tests/examplefiles -i scripts/pylintrc \
-		   -i scripts/find_codetags.py -o codetags.html .
-
-docs:
-	make -C doc html
-
-mapfiles:
-	(cd pygments/formatters; $(PYTHON) _mapping.py)
-	(cd pygments/lexers; $(PYTHON) _mapping.py)
-
-pylint:
-	@pylint --rcfile scripts/pylintrc pygments
-
-reindent:
-	@$(PYTHON) scripts/reindent.py -r -B .
-
-TEST = tests
-
-test:
-	@$(PYTHON) `which py.test` $(TEST)
-
-test-coverage:
-	@$(PYTHON) `which py.test` --cov --cov-report=html --cov-report=term $(TEST)
-
-test-examplefiles:
-	@$(PYTHON) `which py.test` tests.test_examplefiles
-
-tox-test:
-	@tox -- $(TEST)
-
-tox-test-coverage:
-	@tox -- --with-coverage --cover-package=pygments --cover-erase $(TEST)
-
-RLMODULES = pygments.lexers
-
-regexlint:
-	@if [ -z "$(REGEXLINT)" ]; then echo "Please set REGEXLINT=checkout path"; exit 1; fi
-	PYTHONPATH=`pwd`:$(REGEXLINT) $(REGEXLINT)/regexlint/cmdline.py $(RLMODULES)
+#
+# Makefile for Pygments
+# ~~~~~~~~~~~~~~~~~~~~~
+#
+# Combines scripts for common tasks.
+#
+# :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
+# :license: BSD, see LICENSE for details.
+#
+
+PYTHON ?= python3
+
+export PYTHONPATH = $(shell echo "$$PYTHONPATH"):$(shell python -c 'import os; print ":".join(os.path.abspath(line.strip()) for line in file("PYTHONPATH"))' 2>/dev/null)
+
+.PHONY: all check clean clean-pyc codetags docs mapfiles \
+	pylint reindent test test-coverage test-examplefiles \
+	tox-test tox-test-coverage regexlint
+
+all: clean-pyc check test
+
+check:
+	@$(PYTHON) scripts/detect_missing_analyse_text.py || true
+	@pyflakes pygments | grep -v 'but unused' || true
+	@$(PYTHON) scripts/check_sources.py -i build -i dist -i pygments/lexers/_mapping.py \
+		   -i docs/build -i pygments/formatters/_mapping.py -i pygments/unistring.py
+
+clean: clean-pyc
+	-rm -rf build tests/examplefiles/output
+	-rm -f codetags.html
+
+clean-pyc:
+	find . -name '*.pyc' -exec rm -f {} +
+	find . -name '*.pyo' -exec rm -f {} +
+	find . -name '*~' -exec rm -f {} +
+
+codetags:
+	@$(PYTHON) scripts/find_codetags.py -i tests/examplefiles -i scripts/pylintrc \
+		   -i scripts/find_codetags.py -o codetags.html .
+
+docs:
+	make -C doc html
+
+mapfiles:
+	(cd pygments/formatters; $(PYTHON) _mapping.py)
+	(cd pygments/lexers; $(PYTHON) _mapping.py)
+
+pylint:
+	@pylint --rcfile scripts/pylintrc pygments
+
+reindent:
+	@$(PYTHON) scripts/reindent.py -r -B .
+
+TEST = tests
+
+test:
+	@$(PYTHON) `which py.test` $(TEST)
+
+test-coverage:
+	@$(PYTHON) `which py.test` --cov --cov-report=html --cov-report=term $(TEST)
+
+test-examplefiles:
+	@$(PYTHON) `which py.test` tests.test_examplefiles
+
+tox-test:
+	@tox -- $(TEST)
+
+tox-test-coverage:
+	@tox -- --with-coverage --cover-package=pygments --cover-erase $(TEST)
+
+RLMODULES = pygments.lexers
+
+regexlint:
+	@if [ -z "$(REGEXLINT)" ]; then echo "Please set REGEXLINT=checkout path"; exit 1; fi
+	PYTHONPATH=`pwd`:$(REGEXLINT) $(REGEXLINT)/regexlint/cmdline.py $(RLMODULES)
diff --git a/PKG-INFO b/PKG-INFO
old mode 100644
new mode 100755
index bbd99aa..43ccacb
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
 Metadata-Version: 1.2
 Name: Pygments
-Version: 2.6.1
+Version: 2.7.0
 Summary: Pygments is a syntax highlighting package written in Python.
 Home-page: https://pygments.org/
 Author: Georg Brandl
@@ -22,7 +22,7 @@ Description:
         * a number of output formats, presently HTML, LaTeX, RTF, SVG, all image     formats that PIL supports and ANSI sequences
         * it is usable as a command-line tool and as a library
         
-        :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
+        :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
         :license: BSD, see LICENSE for details.
         
 Keywords: syntax highlighting
diff --git a/Pygments.egg-info/PKG-INFO b/Pygments.egg-info/PKG-INFO
old mode 100644
new mode 100755
index bbd99aa..43ccacb
--- a/Pygments.egg-info/PKG-INFO
+++ b/Pygments.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
 Metadata-Version: 1.2
 Name: Pygments
-Version: 2.6.1
+Version: 2.7.0
 Summary: Pygments is a syntax highlighting package written in Python.
 Home-page: https://pygments.org/
 Author: Georg Brandl
@@ -22,7 +22,7 @@ Description:
         * a number of output formats, presently HTML, LaTeX, RTF, SVG, all image     formats that PIL supports and ANSI sequences
         * it is usable as a command-line tool and as a library
         
-        :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
+        :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
         :license: BSD, see LICENSE for details.
         
 Keywords: syntax highlighting
diff --git a/Pygments.egg-info/SOURCES.txt b/Pygments.egg-info/SOURCES.txt
old mode 100644
new mode 100755
index 49e3607..5473017
--- a/Pygments.egg-info/SOURCES.txt
+++ b/Pygments.egg-info/SOURCES.txt
@@ -20,111 +20,6 @@ doc/index.rst
 doc/languages.rst
 doc/make.bat
 doc/pygmentize.1
-doc/_build/doctrees/download.doctree
-doc/_build/doctrees/environment.pickle
-doc/_build/doctrees/faq.doctree
-doc/_build/doctrees/index.doctree
-doc/_build/doctrees/languages.doctree
-doc/_build/doctrees/docs/api.doctree
-doc/_build/doctrees/docs/authors.doctree
-doc/_build/doctrees/docs/changelog.doctree
-doc/_build/doctrees/docs/cmdline.doctree
-doc/_build/doctrees/docs/filterdevelopment.doctree
-doc/_build/doctrees/docs/filters.doctree
-doc/_build/doctrees/docs/formatterdevelopment.doctree
-doc/_build/doctrees/docs/formatters.doctree
-doc/_build/doctrees/docs/index.doctree
-doc/_build/doctrees/docs/integrate.doctree
-doc/_build/doctrees/docs/java.doctree
-doc/_build/doctrees/docs/lexerdevelopment.doctree
-doc/_build/doctrees/docs/lexers.doctree
-doc/_build/doctrees/docs/moinmoin.doctree
-doc/_build/doctrees/docs/plugins.doctree
-doc/_build/doctrees/docs/quickstart.doctree
-doc/_build/doctrees/docs/rstdirective.doctree
-doc/_build/doctrees/docs/styles.doctree
-doc/_build/doctrees/docs/tokens.doctree
-doc/_build/doctrees/docs/unicode.doctree
-doc/_build/html/.buildinfo
-doc/_build/html/download.html
-doc/_build/html/faq.html
-doc/_build/html/genindex.html
-doc/_build/html/index.html
-doc/_build/html/languages.html
-doc/_build/html/objects.inv
-doc/_build/html/py-modindex.html
-doc/_build/html/search.html
-doc/_build/html/searchindex.js
-doc/_build/html/_sources/download.rst.txt
-doc/_build/html/_sources/faq.rst.txt
-doc/_build/html/_sources/index.rst.txt
-doc/_build/html/_sources/languages.rst.txt
-doc/_build/html/_sources/docs/api.rst.txt
-doc/_build/html/_sources/docs/authors.rst.txt
-doc/_build/html/_sources/docs/changelog.rst.txt
-doc/_build/html/_sources/docs/cmdline.rst.txt
-doc/_build/html/_sources/docs/filterdevelopment.rst.txt
-doc/_build/html/_sources/docs/filters.rst.txt
-doc/_build/html/_sources/docs/formatterdevelopment.rst.txt
-doc/_build/html/_sources/docs/formatters.rst.txt
-doc/_build/html/_sources/docs/index.rst.txt
-doc/_build/html/_sources/docs/integrate.rst.txt
-doc/_build/html/_sources/docs/java.rst.txt
-doc/_build/html/_sources/docs/lexerdevelopment.rst.txt
-doc/_build/html/_sources/docs/lexers.rst.txt
-doc/_build/html/_sources/docs/moinmoin.rst.txt
-doc/_build/html/_sources/docs/plugins.rst.txt
-doc/_build/html/_sources/docs/quickstart.rst.txt
-doc/_build/html/_sources/docs/rstdirective.rst.txt
-doc/_build/html/_sources/docs/styles.rst.txt
-doc/_build/html/_sources/docs/tokens.rst.txt
-doc/_build/html/_sources/docs/unicode.rst.txt
-doc/_build/html/_static/basic.css
-doc/_build/html/_static/bodybg.png
-doc/_build/html/_static/demo.css
-doc/_build/html/_static/demo.js
-doc/_build/html/_static/docbg.png
-doc/_build/html/_static/doctools.js
-doc/_build/html/_static/documentation_options.js
-doc/_build/html/_static/favicon.ico
-doc/_build/html/_static/file.png
-doc/_build/html/_static/github.png
-doc/_build/html/_static/jquery-3.4.1.js
-doc/_build/html/_static/jquery.js
-doc/_build/html/_static/language_data.js
-doc/_build/html/_static/listitem.png
-doc/_build/html/_static/logo.png
-doc/_build/html/_static/logo_new.png
-doc/_build/html/_static/logo_only.png
-doc/_build/html/_static/minus.png
-doc/_build/html/_static/plus.png
-doc/_build/html/_static/pocoo.png
-doc/_build/html/_static/pygments.css
-doc/_build/html/_static/pygments14.css
-doc/_build/html/_static/searchtools.js
-doc/_build/html/_static/spinner.gif
-doc/_build/html/_static/underscore-1.3.1.js
-doc/_build/html/_static/underscore.js
-doc/_build/html/docs/api.html
-doc/_build/html/docs/authors.html
-doc/_build/html/docs/changelog.html
-doc/_build/html/docs/cmdline.html
-doc/_build/html/docs/filterdevelopment.html
-doc/_build/html/docs/filters.html
-doc/_build/html/docs/formatterdevelopment.html
-doc/_build/html/docs/formatters.html
-doc/_build/html/docs/index.html
-doc/_build/html/docs/integrate.html
-doc/_build/html/docs/java.html
-doc/_build/html/docs/lexerdevelopment.html
-doc/_build/html/docs/lexers.html
-doc/_build/html/docs/moinmoin.html
-doc/_build/html/docs/plugins.html
-doc/_build/html/docs/quickstart.html
-doc/_build/html/docs/rstdirective.html
-doc/_build/html/docs/styles.html
-doc/_build/html/docs/tokens.html
-doc/_build/html/docs/unicode.html
 doc/_static/demo.css
 doc/_static/demo.js
 doc/_static/favicon.ico
@@ -209,6 +104,7 @@ pygments/lexers/_lasso_builtins.py
 pygments/lexers/_lua_builtins.py
 pygments/lexers/_mapping.py
 pygments/lexers/_mql_builtins.py
+pygments/lexers/_mysql_builtins.py
 pygments/lexers/_openedge_builtins.py
 pygments/lexers/_php_builtins.py
 pygments/lexers/_postgres_builtins.py
@@ -227,8 +123,10 @@ pygments/lexers/ambient.py
 pygments/lexers/ampl.py
 pygments/lexers/apl.py
 pygments/lexers/archetype.py
+pygments/lexers/arrow.py
 pygments/lexers/asm.py
 pygments/lexers/automation.py
+pygments/lexers/bare.py
 pygments/lexers/basic.py
 pygments/lexers/bibtex.py
 pygments/lexers/boa.py
@@ -247,6 +145,7 @@ pygments/lexers/css.py
 pygments/lexers/d.py
 pygments/lexers/dalvik.py
 pygments/lexers/data.py
+pygments/lexers/devicetree.py
 pygments/lexers/diff.py
 pygments/lexers/dotnet.py
 pygments/lexers/dsls.py
@@ -267,6 +166,7 @@ pygments/lexers/fortran.py
 pygments/lexers/foxpro.py
 pygments/lexers/freefem.py
 pygments/lexers/functional.py
+pygments/lexers/gdscript.py
 pygments/lexers/go.py
 pygments/lexers/grammar_notation.py
 pygments/lexers/graph.py
@@ -311,9 +211,11 @@ pygments/lexers/pascal.py
 pygments/lexers/pawn.py
 pygments/lexers/perl.py
 pygments/lexers/php.py
+pygments/lexers/pointless.py
 pygments/lexers/pony.py
 pygments/lexers/praat.py
 pygments/lexers/prolog.py
+pygments/lexers/promql.py
 pygments/lexers/python.py
 pygments/lexers/qvt.py
 pygments/lexers/r.py
@@ -349,6 +251,7 @@ pygments/lexers/text.py
 pygments/lexers/textedit.py
 pygments/lexers/textfmts.py
 pygments/lexers/theorem.py
+pygments/lexers/tnt.py
 pygments/lexers/trafficscript.py
 pygments/lexers/typoscript.py
 pygments/lexers/unicon.py
@@ -362,6 +265,7 @@ pygments/lexers/webmisc.py
 pygments/lexers/whiley.py
 pygments/lexers/x10.py
 pygments/lexers/xorg.py
+pygments/lexers/yang.py
 pygments/lexers/zig.py
 pygments/styles/__init__.py
 pygments/styles/abap.py
@@ -416,15 +320,19 @@ tests/test_bibtex.py
 tests/test_cfm.py
 tests/test_clexer.py
 tests/test_cmdline.py
+tests/test_coffeescript.py
 tests/test_cpp.py
 tests/test_crystal.py
 tests/test_csound.py
 tests/test_data.py
 tests/test_examplefiles.py
 tests/test_ezhil.py
+tests/test_gdscript.py
 tests/test_grammar_notation.py
 tests/test_haskell.py
+tests/test_hdl.py
 tests/test_html_formatter.py
+tests/test_html_formatter_linenos_elements.py
 tests/test_idris.py
 tests/test_inherit.py
 tests/test_irc_formatter.py
@@ -434,12 +342,16 @@ tests/test_julia.py
 tests/test_kotlin.py
 tests/test_latex_formatter.py
 tests/test_lexers_other.py
+tests/test_make.py
 tests/test_markdown_lexer.py
+tests/test_matlab.py
 tests/test_modeline.py
+tests/test_mysql.py
 tests/test_objectiveclexer.py
 tests/test_perllexer.py
 tests/test_php.py
 tests/test_praat.py
+tests/test_promql.py
 tests/test_properties.py
 tests/test_python.py
 tests/test_qbasiclexer.py
@@ -459,6 +371,7 @@ tests/test_usd.py
 tests/test_using_api.py
 tests/test_util.py
 tests/test_whiley.py
+tests/test_yang.py
 tests/dtds/HTML4-f.dtd
 tests/dtds/HTML4-s.dtd
 tests/dtds/HTML4.dcl
@@ -490,10 +403,12 @@ tests/examplefiles/Makefile
 tests/examplefiles/Object.st
 tests/examplefiles/OrderedMap.hx
 tests/examplefiles/RoleQ.pm6
+tests/examplefiles/Singularity
 tests/examplefiles/SmallCheck.hs
 tests/examplefiles/Sorting.mod
 tests/examplefiles/StdGeneric.icl
 tests/examplefiles/Sudoku.lhs
+tests/examplefiles/TiddlyWiki5.tid
 tests/examplefiles/abnf_example1.abnf
 tests/examplefiles/abnf_example2.abnf
 tests/examplefiles/addressbook.proto
@@ -510,6 +425,7 @@ tests/examplefiles/auction.ride
 tests/examplefiles/autoit_submit.au3
 tests/examplefiles/automake.mk
 tests/examplefiles/badcase.java
+tests/examplefiles/bare.bare
 tests/examplefiles/bigtest.nsi
 tests/examplefiles/bnf_example1.bnf
 tests/examplefiles/boot-9.scm
@@ -562,14 +478,17 @@ tests/examplefiles/example.cob
 tests/examplefiles/example.coffee
 tests/examplefiles/example.cpp
 tests/examplefiles/example.dmesg
+tests/examplefiles/example.dts
 tests/examplefiles/example.e
 tests/examplefiles/example.elm
 tests/examplefiles/example.eml
+tests/examplefiles/example.exec
 tests/examplefiles/example.ezt
 tests/examplefiles/example.f90
 tests/examplefiles/example.feature
 tests/examplefiles/example.fish
 tests/examplefiles/example.flo
+tests/examplefiles/example.fst
 tests/examplefiles/example.gd
 tests/examplefiles/example.gi
 tests/examplefiles/example.golo
@@ -616,6 +535,8 @@ tests/examplefiles/example.pony
 tests/examplefiles/example.pp
 tests/examplefiles/example.praat
 tests/examplefiles/example.prg
+tests/examplefiles/example.promql
+tests/examplefiles/example.ptls
 tests/examplefiles/example.rb
 tests/examplefiles/example.red
 tests/examplefiles/example.reds
@@ -644,6 +565,7 @@ tests/examplefiles/example.tasm
 tests/examplefiles/example.tea
 tests/examplefiles/example.tf
 tests/examplefiles/example.thy
+tests/examplefiles/example.tnt
 tests/examplefiles/example.todotxt
 tests/examplefiles/example.toml
 tests/examplefiles/example.ttl
@@ -682,6 +604,7 @@ tests/examplefiles/fucked_up.rb
 tests/examplefiles/function.mu
 tests/examplefiles/functional.rst
 tests/examplefiles/garcia-wachs.kk
+tests/examplefiles/gdscript_example.gd
 tests/examplefiles/genclass.clj
 tests/examplefiles/genshi_example.xml+genshi
 tests/examplefiles/genshitext_example.genshitext
@@ -742,6 +665,7 @@ tests/examplefiles/minimal.ns2
 tests/examplefiles/modula2_test_cases.def
 tests/examplefiles/moin_SyntaxReference.txt
 tests/examplefiles/multiline_regexes.rb
+tests/examplefiles/mysql.txt
 tests/examplefiles/nanomsg.intr
 tests/examplefiles/nasm_aoutso.asm
 tests/examplefiles/nasm_objexe.asm
@@ -766,7 +690,9 @@ tests/examplefiles/plain.bst
 tests/examplefiles/pleac.in.rb
 tests/examplefiles/postgresql_test.txt
 tests/examplefiles/pppoe.applescript
+tests/examplefiles/primesieve.arw
 tests/examplefiles/psql_session.txt
+tests/examplefiles/psysh_test.psysh
 tests/examplefiles/py3_test.txt
 tests/examplefiles/py3tb_test.py3tb
 tests/examplefiles/pycon_ctrlc_traceback
@@ -896,6 +822,7 @@ tests/examplefiles/test.vb
 tests/examplefiles/test.vhdl
 tests/examplefiles/test.xqy
 tests/examplefiles/test.xsl
+tests/examplefiles/test.yang
 tests/examplefiles/test.zeek
 tests/examplefiles/test.zep
 tests/examplefiles/test2.odin
@@ -927,6 +854,70 @@ tests/examplefiles/xml_example
 tests/examplefiles/xorg.conf
 tests/examplefiles/yahalom.cpsa
 tests/examplefiles/zmlrpc.f90
+tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_0_anchor.html
+tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_0_noanchor.html
+tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_3_anchor.html
+tests/html_linenos_expected_output/inline_cls_step_1_start_1_special_3_noanchor.html
+tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_0_anchor.html
+tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_0_noanchor.html
+tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_3_anchor.html
+tests/html_linenos_expected_output/inline_cls_step_1_start_8_special_3_noanchor.html
+tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_0_anchor.html
+tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_0_noanchor.html
+tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_3_anchor.html
+tests/html_linenos_expected_output/inline_cls_step_2_start_1_special_3_noanchor.html
+tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_0_anchor.html
+tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_0_noanchor.html
+tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_3_anchor.html
+tests/html_linenos_expected_output/inline_cls_step_2_start_8_special_3_noanchor.html
+tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_0_anchor.html
+tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_0_noanchor.html
+tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_3_anchor.html
+tests/html_linenos_expected_output/inline_nocls_step_1_start_1_special_3_noanchor.html
+tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_0_anchor.html
+tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_0_noanchor.html
+tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_3_anchor.html
+tests/html_linenos_expected_output/inline_nocls_step_1_start_8_special_3_noanchor.html
+tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_0_anchor.html
+tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_0_noanchor.html
+tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_3_anchor.html
+tests/html_linenos_expected_output/inline_nocls_step_2_start_1_special_3_noanchor.html
+tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_0_anchor.html
+tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_0_noanchor.html
+tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_3_anchor.html
+tests/html_linenos_expected_output/inline_nocls_step_2_start_8_special_3_noanchor.html
+tests/html_linenos_expected_output/table_cls_step_1_start_1_special_0_anchor.html
+tests/html_linenos_expected_output/table_cls_step_1_start_1_special_0_noanchor.html
+tests/html_linenos_expected_output/table_cls_step_1_start_1_special_3_anchor.html
+tests/html_linenos_expected_output/table_cls_step_1_start_1_special_3_noanchor.html
+tests/html_linenos_expected_output/table_cls_step_1_start_8_special_0_anchor.html
+tests/html_linenos_expected_output/table_cls_step_1_start_8_special_0_noanchor.html
+tests/html_linenos_expected_output/table_cls_step_1_start_8_special_3_anchor.html
+tests/html_linenos_expected_output/table_cls_step_1_start_8_special_3_noanchor.html
+tests/html_linenos_expected_output/table_cls_step_2_start_1_special_0_anchor.html
+tests/html_linenos_expected_output/table_cls_step_2_start_1_special_0_noanchor.html
+tests/html_linenos_expected_output/table_cls_step_2_start_1_special_3_anchor.html
+tests/html_linenos_expected_output/table_cls_step_2_start_1_special_3_noanchor.html
+tests/html_linenos_expected_output/table_cls_step_2_start_8_special_0_anchor.html
+tests/html_linenos_expected_output/table_cls_step_2_start_8_special_0_noanchor.html
+tests/html_linenos_expected_output/table_cls_step_2_start_8_special_3_anchor.html
+tests/html_linenos_expected_output/table_cls_step_2_start_8_special_3_noanchor.html
+tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_0_anchor.html
+tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_0_noanchor.html
+tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_3_anchor.html
+tests/html_linenos_expected_output/table_nocls_step_1_start_1_special_3_noanchor.html
+tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_0_anchor.html
+tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_0_noanchor.html
+tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_3_anchor.html
+tests/html_linenos_expected_output/table_nocls_step_1_start_8_special_3_noanchor.html
+tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_0_anchor.html
+tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_0_noanchor.html
+tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_3_anchor.html
+tests/html_linenos_expected_output/table_nocls_step_2_start_1_special_3_noanchor.html
+tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_0_anchor.html
+tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_0_noanchor.html
+tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_3_anchor.html
+tests/html_linenos_expected_output/table_nocls_step_2_start_8_special_3_noanchor.html
 tests/support/empty.py
 tests/support/html_formatter.py
 tests/support/python_lexer.py
diff --git a/Pygments.egg-info/dependency_links.txt b/Pygments.egg-info/dependency_links.txt
old mode 100644
new mode 100755
diff --git a/Pygments.egg-info/entry_points.txt b/Pygments.egg-info/entry_points.txt
old mode 100644
new mode 100755
diff --git a/Pygments.egg-info/not-zip-safe b/Pygments.egg-info/not-zip-safe
old mode 100644
new mode 100755
index 8b13789..d3f5a12
--- a/Pygments.egg-info/not-zip-safe
+++ b/Pygments.egg-info/not-zip-safe
@@ -1 +1 @@
-
+
diff --git a/Pygments.egg-info/top_level.txt b/Pygments.egg-info/top_level.txt
old mode 100644
new mode 100755
diff --git a/README.rst b/README.rst
old mode 100644
new mode 100755
index 6b7be27..d7b47b2
--- a/README.rst
+++ b/README.rst
@@ -1,44 +1,44 @@
-Welcome to Pygments
-===================
-
-This is the source of Pygments.  It is a **generic syntax highlighter** written
-in Python that supports over 500 languages and text formats, for use in code
-hosting, forums, wikis or other applications that need to prettify source code.
-
-Installing
-----------
-
-... works as usual, use ``pip install Pygments`` to get published versions,
-or ``python setup.py install`` to install from a checkout.
-
-Documentation
--------------
-
-... can be found online at https://pygments.org/ or created with Sphinx by ::
-
-   cd doc
-   make html
-
-Development
------------
-
-... takes place on `GitHub `_, where the
-Git repository, tickets and pull requests can be viewed.
-
-Continuous testing runs on GitHub workflows:
-
-.. image:: https://github.com/pygments/pygments/workflows/Pygments/badge.svg
-   :target: https://github.com/pygments/pygments/actions?query=workflow%3APygments
-
-The authors
------------
-
-Pygments is maintained by **Georg Brandl**, e-mail address *georg*\ *@*\ *python.org*
-and **Matthäus Chajdas**.
-
-Many lexers and fixes have been contributed by **Armin Ronacher**, the rest of
-the `Pocoo `_ team and **Tim Hatch**.
-
-The code is distributed under the BSD 2-clause license.  Contributors making pull
-requests must agree that they are able and willing to put their contributions
-under that license.
+Welcome to Pygments
+===================
+
+This is the source of Pygments.  It is a **generic syntax highlighter** written
+in Python that supports over 500 languages and text formats, for use in code
+hosting, forums, wikis or other applications that need to prettify source code.
+
+Installing
+----------
+
+... works as usual, use ``pip install Pygments`` to get published versions,
+or ``python setup.py install`` to install from a checkout.
+
+Documentation
+-------------
+
+... can be found online at https://pygments.org/ or created with Sphinx by ::
+
+   cd doc
+   make html
+
+Development
+-----------
+
+... takes place on `GitHub `_, where the
+Git repository, tickets and pull requests can be viewed.
+
+Continuous testing runs on GitHub workflows:
+
+.. image:: https://github.com/pygments/pygments/workflows/Pygments/badge.svg
+   :target: https://github.com/pygments/pygments/actions?query=workflow%3APygments
+
+The authors
+-----------
+
+Pygments is maintained by **Georg Brandl**, e-mail address *georg*\ *@*\ *python.org*
+and **Matthäus Chajdas**.
+
+Many lexers and fixes have been contributed by **Armin Ronacher**, the rest of
+the `Pocoo `_ team and **Tim Hatch**.
+
+The code is distributed under the BSD 2-clause license.  Contributors making pull
+requests must agree that they are able and willing to put their contributions
+under that license.
diff --git a/doc/Makefile b/doc/Makefile
old mode 100644
new mode 100755
index 7fb7541..aab1d73
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -1,153 +1,153 @@
-# Makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXOPTS    =
-SPHINXBUILD   = PYTHONPATH=.. sphinx-build
-PAPER         =
-BUILDDIR      = _build
-
-# Internal variables.
-PAPEROPT_a4     = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
-# the i18n builder cannot share the environment and doctrees with the others
-I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
-
-.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
-
-help:
-	@echo "Please use \`make ' where  is one of"
-	@echo "  html       to make standalone HTML files"
-	@echo "  dirhtml    to make HTML files named index.html in directories"
-	@echo "  singlehtml to make a single large HTML file"
-	@echo "  pickle     to make pickle files"
-	@echo "  json       to make JSON files"
-	@echo "  htmlhelp   to make HTML files and a HTML help project"
-	@echo "  qthelp     to make HTML files and a qthelp project"
-	@echo "  devhelp    to make HTML files and a Devhelp project"
-	@echo "  epub       to make an epub"
-	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
-	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
-	@echo "  text       to make text files"
-	@echo "  man        to make manual pages"
-	@echo "  texinfo    to make Texinfo files"
-	@echo "  info       to make Texinfo files and run them through makeinfo"
-	@echo "  gettext    to make PO message catalogs"
-	@echo "  changes    to make an overview of all changed/added/deprecated items"
-	@echo "  linkcheck  to check all external links for integrity"
-	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
-
-clean:
-	-rm -rf $(BUILDDIR)/*
-
-html:
-	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
-
-dirhtml:
-	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
-
-singlehtml:
-	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
-	@echo
-	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
-
-pickle:
-	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
-	@echo
-	@echo "Build finished; now you can process the pickle files."
-
-json:
-	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
-	@echo
-	@echo "Build finished; now you can process the JSON files."
-
-htmlhelp:
-	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
-	@echo
-	@echo "Build finished; now you can run HTML Help Workshop with the" \
-	      ".hhp project file in $(BUILDDIR)/htmlhelp."
-
-qthelp:
-	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
-	@echo
-	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
-	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
-	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Pygments.qhcp"
-	@echo "To view the help file:"
-	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Pygments.qhc"
-
-devhelp:
-	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
-	@echo
-	@echo "Build finished."
-	@echo "To view the help file:"
-	@echo "# mkdir -p $$HOME/.local/share/devhelp/Pygments"
-	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Pygments"
-	@echo "# devhelp"
-
-epub:
-	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
-	@echo
-	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
-
-latex:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-	@echo
-	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
-	@echo "Run \`make' in that directory to run these through (pdf)latex" \
-	      "(use \`make latexpdf' here to do that automatically)."
-
-latexpdf:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-	@echo "Running LaTeX files through pdflatex..."
-	$(MAKE) -C $(BUILDDIR)/latex all-pdf
-	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
-
-text:
-	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
-	@echo
-	@echo "Build finished. The text files are in $(BUILDDIR)/text."
-
-man:
-	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
-	@echo
-	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
-
-texinfo:
-	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
-	@echo
-	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
-	@echo "Run \`make' in that directory to run these through makeinfo" \
-	      "(use \`make info' here to do that automatically)."
-
-info:
-	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
-	@echo "Running Texinfo files through makeinfo..."
-	make -C $(BUILDDIR)/texinfo info
-	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
-
-gettext:
-	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
-	@echo
-	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
-
-changes:
-	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
-	@echo
-	@echo "The overview file is in $(BUILDDIR)/changes."
-
-linkcheck:
-	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
-	@echo
-	@echo "Link check complete; look for any errors in the above output " \
-	      "or in $(BUILDDIR)/linkcheck/output.txt."
-
-doctest:
-	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
-	@echo "Testing of doctests in the sources finished, look at the " \
-	      "results in $(BUILDDIR)/doctest/output.txt."
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = PYTHONPATH=.. sphinx-build
+PAPER         =
+BUILDDIR      = _build
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+	@echo "Please use \`make ' where  is one of"
+	@echo "  html       to make standalone HTML files"
+	@echo "  dirhtml    to make HTML files named index.html in directories"
+	@echo "  singlehtml to make a single large HTML file"
+	@echo "  pickle     to make pickle files"
+	@echo "  json       to make JSON files"
+	@echo "  htmlhelp   to make HTML files and a HTML help project"
+	@echo "  qthelp     to make HTML files and a qthelp project"
+	@echo "  devhelp    to make HTML files and a Devhelp project"
+	@echo "  epub       to make an epub"
+	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
+	@echo "  text       to make text files"
+	@echo "  man        to make manual pages"
+	@echo "  texinfo    to make Texinfo files"
+	@echo "  info       to make Texinfo files and run them through makeinfo"
+	@echo "  gettext    to make PO message catalogs"
+	@echo "  changes    to make an overview of all changed/added/deprecated items"
+	@echo "  linkcheck  to check all external links for integrity"
+	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+	-rm -rf $(BUILDDIR)/*
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+	@echo
+	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Pygments.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Pygments.qhc"
+
+devhelp:
+	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+	@echo
+	@echo "Build finished."
+	@echo "To view the help file:"
+	@echo "# mkdir -p $$HOME/.local/share/devhelp/Pygments"
+	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Pygments"
+	@echo "# devhelp"
+
+epub:
+	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+	@echo
+	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+	@echo "Run \`make' in that directory to run these through (pdf)latex" \
+	      "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through pdflatex..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+	@echo
+	@echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+	@echo
+	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo
+	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+	@echo "Run \`make' in that directory to run these through makeinfo" \
+	      "(use \`make info' here to do that automatically)."
+
+info:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo "Running Texinfo files through makeinfo..."
+	make -C $(BUILDDIR)/texinfo info
+	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+	@echo
+	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/doc/_build/doctrees/docs/api.doctree b/doc/_build/doctrees/docs/api.doctree
deleted file mode 100644
index 730fb8b..0000000
Binary files a/doc/_build/doctrees/docs/api.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/docs/authors.doctree b/doc/_build/doctrees/docs/authors.doctree
deleted file mode 100644
index 0d25fa8..0000000
Binary files a/doc/_build/doctrees/docs/authors.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/docs/changelog.doctree b/doc/_build/doctrees/docs/changelog.doctree
deleted file mode 100644
index 6edfce7..0000000
Binary files a/doc/_build/doctrees/docs/changelog.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/docs/cmdline.doctree b/doc/_build/doctrees/docs/cmdline.doctree
deleted file mode 100644
index 74544a0..0000000
Binary files a/doc/_build/doctrees/docs/cmdline.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/docs/filterdevelopment.doctree b/doc/_build/doctrees/docs/filterdevelopment.doctree
deleted file mode 100644
index 36ac3ef..0000000
Binary files a/doc/_build/doctrees/docs/filterdevelopment.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/docs/filters.doctree b/doc/_build/doctrees/docs/filters.doctree
deleted file mode 100644
index e407baa..0000000
Binary files a/doc/_build/doctrees/docs/filters.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/docs/formatterdevelopment.doctree b/doc/_build/doctrees/docs/formatterdevelopment.doctree
deleted file mode 100644
index 9feccf2..0000000
Binary files a/doc/_build/doctrees/docs/formatterdevelopment.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/docs/formatters.doctree b/doc/_build/doctrees/docs/formatters.doctree
deleted file mode 100644
index b128f6a..0000000
Binary files a/doc/_build/doctrees/docs/formatters.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/docs/index.doctree b/doc/_build/doctrees/docs/index.doctree
deleted file mode 100644
index 5bf88f3..0000000
Binary files a/doc/_build/doctrees/docs/index.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/docs/integrate.doctree b/doc/_build/doctrees/docs/integrate.doctree
deleted file mode 100644
index b41e758..0000000
Binary files a/doc/_build/doctrees/docs/integrate.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/docs/java.doctree b/doc/_build/doctrees/docs/java.doctree
deleted file mode 100644
index 4f61c1c..0000000
Binary files a/doc/_build/doctrees/docs/java.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/docs/lexerdevelopment.doctree b/doc/_build/doctrees/docs/lexerdevelopment.doctree
deleted file mode 100644
index f7d372e..0000000
Binary files a/doc/_build/doctrees/docs/lexerdevelopment.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/docs/lexers.doctree b/doc/_build/doctrees/docs/lexers.doctree
deleted file mode 100644
index ea4ab6d..0000000
Binary files a/doc/_build/doctrees/docs/lexers.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/docs/moinmoin.doctree b/doc/_build/doctrees/docs/moinmoin.doctree
deleted file mode 100644
index ef2922a..0000000
Binary files a/doc/_build/doctrees/docs/moinmoin.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/docs/plugins.doctree b/doc/_build/doctrees/docs/plugins.doctree
deleted file mode 100644
index c8af0ba..0000000
Binary files a/doc/_build/doctrees/docs/plugins.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/docs/quickstart.doctree b/doc/_build/doctrees/docs/quickstart.doctree
deleted file mode 100644
index 82ac98c..0000000
Binary files a/doc/_build/doctrees/docs/quickstart.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/docs/rstdirective.doctree b/doc/_build/doctrees/docs/rstdirective.doctree
deleted file mode 100644
index 1e80256..0000000
Binary files a/doc/_build/doctrees/docs/rstdirective.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/docs/styles.doctree b/doc/_build/doctrees/docs/styles.doctree
deleted file mode 100644
index 3cf3380..0000000
Binary files a/doc/_build/doctrees/docs/styles.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/docs/tokens.doctree b/doc/_build/doctrees/docs/tokens.doctree
deleted file mode 100644
index 6f4a1a7..0000000
Binary files a/doc/_build/doctrees/docs/tokens.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/docs/unicode.doctree b/doc/_build/doctrees/docs/unicode.doctree
deleted file mode 100644
index ec129cb..0000000
Binary files a/doc/_build/doctrees/docs/unicode.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/download.doctree b/doc/_build/doctrees/download.doctree
deleted file mode 100644
index 6788fc3..0000000
Binary files a/doc/_build/doctrees/download.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/environment.pickle b/doc/_build/doctrees/environment.pickle
deleted file mode 100644
index f0a0d35..0000000
Binary files a/doc/_build/doctrees/environment.pickle and /dev/null differ
diff --git a/doc/_build/doctrees/faq.doctree b/doc/_build/doctrees/faq.doctree
deleted file mode 100644
index 6f5324c..0000000
Binary files a/doc/_build/doctrees/faq.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/index.doctree b/doc/_build/doctrees/index.doctree
deleted file mode 100644
index e35a485..0000000
Binary files a/doc/_build/doctrees/index.doctree and /dev/null differ
diff --git a/doc/_build/doctrees/languages.doctree b/doc/_build/doctrees/languages.doctree
deleted file mode 100644
index 1ef0561..0000000
Binary files a/doc/_build/doctrees/languages.doctree and /dev/null differ
diff --git a/doc/_build/html/.buildinfo b/doc/_build/html/.buildinfo
deleted file mode 100644
index 23b7cb1..0000000
--- a/doc/_build/html/.buildinfo
+++ /dev/null
@@ -1,4 +0,0 @@
-# Sphinx build info version 1
-# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
-config: c79bd2c7f6735b09a44f7dfcaa237099
-tags: 645f666f9bcd5a90fca523b33c5a78b7
diff --git a/doc/_build/html/_sources/docs/api.rst.txt b/doc/_build/html/_sources/docs/api.rst.txt
deleted file mode 100644
index a6b242d..0000000
--- a/doc/_build/html/_sources/docs/api.rst.txt
+++ /dev/null
@@ -1,354 +0,0 @@
-.. -*- mode: rst -*-
-
-=====================
-The full Pygments API
-=====================
-
-This page describes the Pygments API.
-
-High-level API
-==============
-
-.. module:: pygments
-
-Functions from the :mod:`pygments` module:
-
-.. function:: lex(code, lexer)
-
-    Lex `code` with the `lexer` (must be a `Lexer` instance)
-    and return an iterable of tokens. Currently, this only calls
-    `lexer.get_tokens()`.
-
-.. function:: format(tokens, formatter, outfile=None)
-
-    Format a token stream (iterable of tokens) `tokens` with the
-    `formatter` (must be a `Formatter` instance). The result is
-    written to `outfile`, or if that is ``None``, returned as a
-    string.
-
-.. function:: highlight(code, lexer, formatter, outfile=None)
-
-    This is the most high-level highlighting function.
-    It combines `lex` and `format` in one function.
-
-
-.. module:: pygments.lexers
-
-Functions from :mod:`pygments.lexers`:
-
-.. function:: get_lexer_by_name(alias, **options)
-
-    Return an instance of a `Lexer` subclass that has `alias` in its
-    aliases list. The lexer is given the `options` at its
-    instantiation.
-
-    Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is
-    found.
-
-.. function:: get_lexer_for_filename(fn, **options)
-
-    Return a `Lexer` subclass instance that has a filename pattern
-    matching `fn`. The lexer is given the `options` at its
-    instantiation.
-
-    Will raise :exc:`pygments.util.ClassNotFound` if no lexer for that filename
-    is found.
-
-.. function:: get_lexer_for_mimetype(mime, **options)
-
-    Return a `Lexer` subclass instance that has `mime` in its mimetype
-    list. The lexer is given the `options` at its instantiation.
-
-    Will raise :exc:`pygments.util.ClassNotFound` if not lexer for that mimetype
-    is found.
-
-.. function:: load_lexer_from_file(filename, lexername="CustomLexer", **options)
-
-    Return a `Lexer` subclass instance loaded from the provided file, relative
-    to the current directory. The file is expected to contain a Lexer class
-    named `lexername` (by default, CustomLexer). Users should be very careful with
-    the input, because this method is equivalent to running eval on the input file.
-    The lexer is given the `options` at its instantiation.
-
-    :exc:`ClassNotFound` is raised if there are any errors loading the Lexer
-
-    .. versionadded:: 2.2
-
-.. function:: guess_lexer(text, **options)
-
-    Return a `Lexer` subclass instance that's guessed from the text in
-    `text`. For that, the :meth:`.analyse_text()` method of every known lexer
-    class is called with the text as argument, and the lexer which returned the
-    highest value will be instantiated and returned.
-
-    :exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can
-    handle the content.
-
-.. function:: guess_lexer_for_filename(filename, text, **options)
-
-    As :func:`guess_lexer()`, but only lexers which have a pattern in `filenames`
-    or `alias_filenames` that matches `filename` are taken into consideration.
-
-    :exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can
-    handle the content.
-
-.. function:: get_all_lexers()
-
-    Return an iterable over all registered lexers, yielding tuples in the
-    format::
-
-    	(longname, tuple of aliases, tuple of filename patterns, tuple of mimetypes)
-
-    .. versionadded:: 0.6
-
-.. function:: find_lexer_class_by_name(alias)
-
-    Return the `Lexer` subclass that has `alias` in its aliases list, without
-    instantiating it.
-
-    Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is
-    found.
-
-    .. versionadded:: 2.2
-
-.. function:: find_lexer_class(name)
-
-    Return the `Lexer` subclass that with the *name* attribute as given by
-    the *name* argument.
-
-
-.. module:: pygments.formatters
-
-Functions from :mod:`pygments.formatters`:
-
-.. function:: get_formatter_by_name(alias, **options)
-
-    Return an instance of a :class:`.Formatter` subclass that has `alias` in its
-    aliases list. The formatter is given the `options` at its instantiation.
-
-    Will raise :exc:`pygments.util.ClassNotFound` if no formatter with that
-    alias is found.
-
-.. function:: get_formatter_for_filename(fn, **options)
-
-    Return a :class:`.Formatter` subclass instance that has a filename pattern
-    matching `fn`. The formatter is given the `options` at its instantiation.
-
-    Will raise :exc:`pygments.util.ClassNotFound` if no formatter for that filename
-    is found.
-
-.. function:: load_formatter_from_file(filename, formattername="CustomFormatter", **options)
-
-    Return a `Formatter` subclass instance loaded from the provided file, relative
-    to the current directory. The file is expected to contain a Formatter class
-    named ``formattername`` (by default, CustomFormatter). Users should be very
-    careful with the input, because this method is equivalent to running eval
-    on the input file. The formatter is given the `options` at its instantiation.
-
-    :exc:`ClassNotFound` is raised if there are any errors loading the Formatter
-
-    .. versionadded:: 2.2
-
-.. module:: pygments.styles
-
-Functions from :mod:`pygments.styles`:
-
-.. function:: get_style_by_name(name)
-
-    Return a style class by its short name. The names of the builtin styles
-    are listed in :data:`pygments.styles.STYLE_MAP`.
-
-    Will raise :exc:`pygments.util.ClassNotFound` if no style of that name is
-    found.
-
-.. function:: get_all_styles()
-
-    Return an iterable over all registered styles, yielding their names.
-
-    .. versionadded:: 0.6
-
-
-.. module:: pygments.lexer
-
-Lexers
-======
-
-The base lexer class from which all lexers are derived is:
-
-.. class:: Lexer(**options)
-
-    The constructor takes a \*\*keywords dictionary of options.
-    Every subclass must first process its own options and then call
-    the `Lexer` constructor, since it processes the `stripnl`,
-    `stripall` and `tabsize` options.
-
-    An example looks like this:
-
-    .. sourcecode:: python
-
-        def __init__(self, **options):
-            self.compress = options.get('compress', '')
-            Lexer.__init__(self, **options)
-
-    As these options must all be specifiable as strings (due to the
-    command line usage), there are various utility functions
-    available to help with that, see `Option processing`_.
-
-    .. method:: get_tokens(text)
-
-        This method is the basic interface of a lexer. It is called by
-        the `highlight()` function. It must process the text and return an
-        iterable of ``(tokentype, value)`` pairs from `text`.
-
-        Normally, you don't need to override this method. The default
-        implementation processes the `stripnl`, `stripall` and `tabsize`
-        options and then yields all tokens from `get_tokens_unprocessed()`,
-        with the ``index`` dropped.
-
-    .. method:: get_tokens_unprocessed(text)
-
-        This method should process the text and return an iterable of
-        ``(index, tokentype, value)`` tuples where ``index`` is the starting
-        position of the token within the input text.
-
-        This method must be overridden by subclasses.
-
-    .. staticmethod:: analyse_text(text)
-
-        A static method which is called for lexer guessing. It should analyse
-        the text and return a float in the range from ``0.0`` to ``1.0``.
-        If it returns ``0.0``, the lexer will not be selected as the most
-        probable one, if it returns ``1.0``, it will be selected immediately.
-
-        .. note:: You don't have to add ``@staticmethod`` to the definition of
-                  this method, this will be taken care of by the Lexer's metaclass.
-
-    For a list of known tokens have a look at the :doc:`tokens` page.
-
-    A lexer also can have the following attributes (in fact, they are mandatory
-    except `alias_filenames`) that are used by the builtin lookup mechanism.
-
-    .. attribute:: name
-
-        Full name for the lexer, in human-readable form.
-
-    .. attribute:: aliases
-
-        A list of short, unique identifiers that can be used to lookup
-        the lexer from a list, e.g. using `get_lexer_by_name()`.
-
-    .. attribute:: filenames
-
-        A list of `fnmatch` patterns that match filenames which contain
-        content for this lexer. The patterns in this list should be unique among
-        all lexers.
-
-    .. attribute:: alias_filenames
-
-        A list of `fnmatch` patterns that match filenames which may or may not
-        contain content for this lexer. This list is used by the
-        :func:`.guess_lexer_for_filename()` function, to determine which lexers
-        are then included in guessing the correct one. That means that
-        e.g. every lexer for HTML and a template language should include
-        ``\*.html`` in this list.
-
-    .. attribute:: mimetypes
-
-        A list of MIME types for content that can be lexed with this
-        lexer.
-
-
-.. module:: pygments.formatter
-
-Formatters
-==========
-
-A formatter is derived from this class:
-
-
-.. class:: Formatter(**options)
-
-    As with lexers, this constructor processes options and then must call the
-    base class :meth:`__init__`.
-
-    The :class:`Formatter` class recognizes the options `style`, `full` and
-    `title`.  It is up to the formatter class whether it uses them.
-
-    .. method:: get_style_defs(arg='')
-
-        This method must return statements or declarations suitable to define
-        the current style for subsequent highlighted text (e.g. CSS classes
-        in the `HTMLFormatter`).
-
-        The optional argument `arg` can be used to modify the generation and
-        is formatter dependent (it is standardized because it can be given on
-        the command line).
-
-        This method is called by the ``-S`` :doc:`command-line option `,
-        the `arg` is then given by the ``-a`` option.
-
-    .. method:: format(tokensource, outfile)
-
-        This method must format the tokens from the `tokensource` iterable and
-        write the formatted version to the file object `outfile`.
-
-        Formatter options can control how exactly the tokens are converted.
-
-    .. versionadded:: 0.7
-       A formatter must have the following attributes that are used by the
-       builtin lookup mechanism.
-
-    .. attribute:: name
-
-        Full name for the formatter, in human-readable form.
-
-    .. attribute:: aliases
-
-        A list of short, unique identifiers that can be used to lookup
-        the formatter from a list, e.g. using :func:`.get_formatter_by_name()`.
-
-    .. attribute:: filenames
-
-        A list of :mod:`fnmatch` patterns that match filenames for which this
-        formatter can produce output. The patterns in this list should be unique
-        among all formatters.
-
-
-.. module:: pygments.util
-
-Option processing
-=================
-
-The :mod:`pygments.util` module has some utility functions usable for option
-processing:
-
-.. exception:: OptionError
-
-    This exception will be raised by all option processing functions if
-    the type or value of the argument is not correct.
-
-.. function:: get_bool_opt(options, optname, default=None)
-
-    Interpret the key `optname` from the dictionary `options` as a boolean and
-    return it. Return `default` if `optname` is not in `options`.
-
-    The valid string values for ``True`` are ``1``, ``yes``, ``true`` and
-    ``on``, the ones for ``False`` are ``0``, ``no``, ``false`` and ``off``
-    (matched case-insensitively).
-
-.. function:: get_int_opt(options, optname, default=None)
-
-    As :func:`get_bool_opt`, but interpret the value as an integer.
-
-.. function:: get_list_opt(options, optname, default=None)
-
-    If the key `optname` from the dictionary `options` is a string,
-    split it at whitespace and return it. If it is already a list
-    or a tuple, it is returned as a list.
-
-.. function:: get_choice_opt(options, optname, allowed, default=None)
-
-    If the key `optname` from the dictionary is not in the sequence
-    `allowed`, raise an error, otherwise return it.
-
-    .. versionadded:: 0.8
diff --git a/doc/_build/html/_sources/docs/authors.rst.txt b/doc/_build/html/_sources/docs/authors.rst.txt
deleted file mode 100644
index f8373f0..0000000
--- a/doc/_build/html/_sources/docs/authors.rst.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-Full contributor list
-=====================
-
-.. include:: ../../AUTHORS
diff --git a/doc/_build/html/_sources/docs/changelog.rst.txt b/doc/_build/html/_sources/docs/changelog.rst.txt
deleted file mode 100644
index f264cab..0000000
--- a/doc/_build/html/_sources/docs/changelog.rst.txt
+++ /dev/null
@@ -1 +0,0 @@
-.. include:: ../../CHANGES
diff --git a/doc/_build/html/_sources/docs/cmdline.rst.txt b/doc/_build/html/_sources/docs/cmdline.rst.txt
deleted file mode 100644
index e4f94ea..0000000
--- a/doc/_build/html/_sources/docs/cmdline.rst.txt
+++ /dev/null
@@ -1,166 +0,0 @@
-.. -*- mode: rst -*-
-
-======================
-Command Line Interface
-======================
-
-You can use Pygments from the shell, provided you installed the
-:program:`pygmentize` script::
-
-    $ pygmentize test.py
-    print "Hello World"
-
-will print the file test.py to standard output, using the Python lexer
-(inferred from the file name extension) and the terminal formatter (because
-you didn't give an explicit formatter name).
-
-If you want HTML output::
-
-    $ pygmentize -f html -l python -o test.html test.py
-
-As you can see, the -l option explicitly selects a lexer. As seen above, if you
-give an input file name and it has an extension that Pygments recognizes, you can
-omit this option.
-
-The ``-o`` option gives an output file name. If it is not given, output is
-written to stdout.
-
-The ``-f`` option selects a formatter (as with ``-l``, it can also be omitted
-if an output file name is given and has a supported extension).
-If no output file name is given and ``-f`` is omitted, the
-:class:`.TerminalFormatter` is used.
-
-The above command could therefore also be given as::
-
-    $ pygmentize -o test.html test.py
-
-To create a full HTML document, including line numbers and stylesheet (using the
-"emacs" style), highlighting the Python file ``test.py`` to ``test.html``::
-
-    $ pygmentize -O full,style=emacs -o test.html test.py
-
-
-Options and filters
--------------------
-
-Lexer and formatter options can be given using the ``-O`` option::
-
-    $ pygmentize -f html -O style=colorful,linenos=1 -l python test.py
-
-Be sure to enclose the option string in quotes if it contains any special shell
-characters, such as spaces or expansion wildcards like ``*``. If an option
-expects a list value, separate the list entries with spaces (you'll have to
-quote the option value in this case too, so that the shell doesn't split it).
-
-Since the ``-O`` option argument is split at commas and expects the split values
-to be of the form ``name=value``, you can't give an option value that contains
-commas or equals signs.  Therefore, an option ``-P`` is provided (as of Pygments
-0.9) that works like ``-O`` but can only pass one option per ``-P``. Its value
-can then contain all characters::
-
-    $ pygmentize -P "heading=Pygments, the Python highlighter" ...
-
-Filters are added to the token stream using the ``-F`` option::
-
-    $ pygmentize -f html -l pascal -F keywordcase:case=upper main.pas
-
-As you see, options for the filter are given after a colon. As for ``-O``, the
-filter name and options must be one shell word, so there may not be any spaces
-around the colon.
-
-
-Generating styles
------------------
-
-Formatters normally don't output full style information.  For example, the HTML
-formatter by default only outputs ```` tags with ``class`` attributes.
-Therefore, there's a special ``-S`` option for generating style definitions.
-Usage is as follows::
-
-    $ pygmentize -f html -S colorful -a .syntax
-
-generates a CSS style sheet (because you selected the HTML formatter) for
-the "colorful" style prepending a ".syntax" selector to all style rules.
-
-For an explanation what ``-a`` means for :doc:`a particular formatter
-`, look for the `arg` argument for the formatter's
-:meth:`.get_style_defs()` method.
-
-
-Getting lexer names
--------------------
-
-.. versionadded:: 1.0
-
-The ``-N`` option guesses a lexer name for a given filename, so that ::
-
-    $ pygmentize -N setup.py
-
-will print out ``python``.  It won't highlight anything yet.  If no specific
-lexer is known for that filename, ``text`` is printed.
-
-Custom Lexers and Formatters
-----------------------------
-
-.. versionadded:: 2.2
-
-The ``-x`` flag enables custom lexers and formatters to be loaded
-from files relative to the current directory. Create a file with a class named
-CustomLexer or CustomFormatter, then specify it on the command line::
-
-    $ pygmentize -l your_lexer.py -f your_formatter.py -x
-
-You can also specify the name of your class with a colon::
-
-    $ pygmentize -l your_lexer.py:SomeLexer -x
-
-For more information, see :doc:`the Pygments documentation on Lexer development
-`.
-
-Getting help
-------------
-
-The ``-L`` option lists lexers, formatters, along with their short
-names and supported file name extensions, styles and filters. If you want to see
-only one category, give it as an argument::
-
-    $ pygmentize -L filters
-
-will list only all installed filters.
-
-The ``-H`` option will give you detailed information (the same that can be found
-in this documentation) about a lexer, formatter or filter. Usage is as follows::
-
-    $ pygmentize -H formatter html
-
-will print the help for the HTML formatter, while ::
-
-    $ pygmentize -H lexer python
-
-will print the help for the Python lexer, etc.
-
-
-A note on encodings
--------------------
-
-.. versionadded:: 0.9
-
-Pygments tries to be smart regarding encodings in the formatting process:
-
-* If you give an ``encoding`` option, it will be used as the input and
-  output encoding.
-
-* If you give an ``outencoding`` option, it will override ``encoding``
-  as the output encoding.
-
-* If you give an ``inencoding`` option, it will override ``encoding``
-  as the input encoding.
-
-* If you don't give an encoding and have given an output file, the default
-  encoding for lexer and formatter is the terminal encoding or the default
-  locale encoding of the system.  As a last resort, ``latin1`` is used (which
-  will pass through all non-ASCII characters).
-
-* If you don't give an encoding and haven't given an output file (that means
-  output is written to the console), the default encoding for lexer and
-  formatter is the terminal encoding (``sys.stdout.encoding``).
diff --git a/doc/_build/html/_sources/docs/filterdevelopment.rst.txt b/doc/_build/html/_sources/docs/filterdevelopment.rst.txt
deleted file mode 100644
index fbcd0a0..0000000
--- a/doc/_build/html/_sources/docs/filterdevelopment.rst.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-.. -*- mode: rst -*-
-
-=====================
-Write your own filter
-=====================
-
-.. versionadded:: 0.7
-
-Writing own filters is very easy. All you have to do is to subclass
-the `Filter` class and override the `filter` method. Additionally a
-filter is instantiated with some keyword arguments you can use to
-adjust the behavior of your filter.
-
-
-Subclassing Filters
-===================
-
-As an example, we write a filter that converts all `Name.Function` tokens
-to normal `Name` tokens to make the output less colorful.
-
-.. sourcecode:: python
-
-    from pygments.util import get_bool_opt
-    from pygments.token import Name
-    from pygments.filter import Filter
-
-    class UncolorFilter(Filter):
-
-        def __init__(self, **options):
-            Filter.__init__(self, **options)
-            self.class_too = get_bool_opt(options, 'classtoo')
-
-        def filter(self, lexer, stream):
-            for ttype, value in stream:
-                if ttype is Name.Function or (self.class_too and
-                                              ttype is Name.Class):
-                    ttype = Name
-                yield ttype, value
-
-Some notes on the `lexer` argument: that can be quite confusing since it doesn't
-need to be a lexer instance. If a filter was added by using the `add_filter()`
-function of lexers, that lexer is registered for the filter. In that case
-`lexer` will refer to the lexer that has registered the filter. It *can* be used
-to access options passed to a lexer. Because it could be `None` you always have
-to check for that case if you access it.
-
-
-Using a decorator
-=================
-
-You can also use the `simplefilter` decorator from the `pygments.filter` module:
-
-.. sourcecode:: python
-
-    from pygments.util import get_bool_opt
-    from pygments.token import Name
-    from pygments.filter import simplefilter
-
-
-    @simplefilter
-    def uncolor(self, lexer, stream, options):
-        class_too = get_bool_opt(options, 'classtoo')
-        for ttype, value in stream:
-            if ttype is Name.Function or (class_too and
-                                          ttype is Name.Class):
-                ttype = Name
-            yield ttype, value
-
-The decorator automatically subclasses an internal filter class and uses the
-decorated function as a method for filtering.  (That's why there is a `self`
-argument that you probably won't end up using in the method.)
diff --git a/doc/_build/html/_sources/docs/filters.rst.txt b/doc/_build/html/_sources/docs/filters.rst.txt
deleted file mode 100644
index ff2519a..0000000
--- a/doc/_build/html/_sources/docs/filters.rst.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-.. -*- mode: rst -*-
-
-=======
-Filters
-=======
-
-.. versionadded:: 0.7
-
-You can filter token streams coming from lexers to improve or annotate the
-output. For example, you can highlight special words in comments, convert
-keywords to upper or lowercase to enforce a style guide etc.
-
-To apply a filter, you can use the `add_filter()` method of a lexer:
-
-.. sourcecode:: pycon
-
-    >>> from pygments.lexers import PythonLexer
-    >>> l = PythonLexer()
-    >>> # add a filter given by a string and options
-    >>> l.add_filter('codetagify', case='lower')
-    >>> l.filters
-    []
-    >>> from pygments.filters import KeywordCaseFilter
-    >>> # or give an instance
-    >>> l.add_filter(KeywordCaseFilter(case='lower'))
-
-The `add_filter()` method takes keyword arguments which are forwarded to
-the constructor of the filter.
-
-To get a list of all registered filters by name, you can use the
-`get_all_filters()` function from the `pygments.filters` module that returns an
-iterable for all known filters.
-
-If you want to write your own filter, have a look at :doc:`Write your own filter
-`.
-
-
-Builtin Filters
-===============
-
-.. pygmentsdoc:: filters
diff --git a/doc/_build/html/_sources/docs/formatterdevelopment.rst.txt b/doc/_build/html/_sources/docs/formatterdevelopment.rst.txt
deleted file mode 100644
index 2bfac05..0000000
--- a/doc/_build/html/_sources/docs/formatterdevelopment.rst.txt
+++ /dev/null
@@ -1,169 +0,0 @@
-.. -*- mode: rst -*-
-
-========================
-Write your own formatter
-========================
-
-As well as creating :doc:`your own lexer `, writing a new
-formatter for Pygments is easy and straightforward.
-
-A formatter is a class that is initialized with some keyword arguments (the
-formatter options) and that must provides a `format()` method.
-Additionally a formatter should provide a `get_style_defs()` method that
-returns the style definitions from the style in a form usable for the
-formatter's output format.
-
-
-Quickstart
-==========
-
-The most basic formatter shipped with Pygments is the `NullFormatter`. It just
-sends the value of a token to the output stream:
-
-.. sourcecode:: python
-
-    from pygments.formatter import Formatter
-
-    class NullFormatter(Formatter):
-        def format(self, tokensource, outfile):
-            for ttype, value in tokensource:
-                outfile.write(value)
-
-As you can see, the `format()` method is passed two parameters: `tokensource`
-and `outfile`. The first is an iterable of ``(token_type, value)`` tuples,
-the latter a file like object with a `write()` method.
-
-Because the formatter is that basic it doesn't overwrite the `get_style_defs()`
-method.
-
-
-Styles
-======
-
-Styles aren't instantiated but their metaclass provides some class functions
-so that you can access the style definitions easily.
-
-Styles are iterable and yield tuples in the form ``(ttype, d)`` where `ttype`
-is a token and `d` is a dict with the following keys:
-
-``'color'``
-    Hexadecimal color value (eg: ``'ff0000'`` for red) or `None` if not
-    defined.
-
-``'bold'``
-    `True` if the value should be bold
-
-``'italic'``
-    `True` if the value should be italic
-
-``'underline'``
-    `True` if the value should be underlined
-
-``'bgcolor'``
-    Hexadecimal color value for the background (eg: ``'eeeeeee'`` for light
-    gray) or `None` if not defined.
-
-``'border'``
-    Hexadecimal color value for the border (eg: ``'0000aa'`` for a dark
-    blue) or `None` for no border.
-
-Additional keys might appear in the future, formatters should ignore all keys
-they don't support.
-
-
-HTML 3.2 Formatter
-==================
-
-For an more complex example, let's implement a HTML 3.2 Formatter. We don't
-use CSS but inline markup (````, ````, etc). Because this isn't good
-style this formatter isn't in the standard library ;-)
-
-.. sourcecode:: python
-
-    from pygments.formatter import Formatter
-
-    class OldHtmlFormatter(Formatter):
-
-        def __init__(self, **options):
-            Formatter.__init__(self, **options)
-
-            # create a dict of (start, end) tuples that wrap the
-            # value of a token so that we can use it in the format
-            # method later
-            self.styles = {}
-
-            # we iterate over the `_styles` attribute of a style item
-            # that contains the parsed style values.
-            for token, style in self.style:
-                start = end = ''
-                # a style item is a tuple in the following form:
-                # colors are readily specified in hex: 'RRGGBB'
-                if style['color']:
-                    start += '' % style['color']
-                    end = '' + end
-                if style['bold']:
-                    start += ''
-                    end = '' + end
-                if style['italic']:
-                    start += ''
-                    end = '' + end
-                if style['underline']:
-                    start += ''
-                    end = '' + end
-                self.styles[token] = (start, end)
-
-        def format(self, tokensource, outfile):
-            # lastval is a string we use for caching
-            # because it's possible that an lexer yields a number
-            # of consecutive tokens with the same token type.
-            # to minimize the size of the generated html markup we
-            # try to join the values of same-type tokens here
-            lastval = ''
-            lasttype = None
-
-            # wrap the whole output with 
-            outfile.write('
')
-
-            for ttype, value in tokensource:
-                # if the token type doesn't exist in the stylemap
-                # we try it with the parent of the token type
-                # eg: parent of Token.Literal.String.Double is
-                # Token.Literal.String
-                while ttype not in self.styles:
-                    ttype = ttype.parent
-                if ttype == lasttype:
-                    # the current token type is the same of the last
-                    # iteration. cache it
-                    lastval += value
-                else:
-                    # not the same token as last iteration, but we
-                    # have some data in the buffer. wrap it with the
-                    # defined style and write it to the output file
-                    if lastval:
-                        stylebegin, styleend = self.styles[lasttype]
-                        outfile.write(stylebegin + lastval + styleend)
-                    # set lastval/lasttype to current values
-                    lastval = value
-                    lasttype = ttype
-
-            # if something is left in the buffer, write it to the
-            # output file, then close the opened 
 tag
-            if lastval:
-                stylebegin, styleend = self.styles[lasttype]
-                outfile.write(stylebegin + lastval + styleend)
-            outfile.write('
\n') - -The comments should explain it. Again, this formatter doesn't override the -`get_style_defs()` method. If we would have used CSS classes instead of -inline HTML markup, we would need to generate the CSS first. For that -purpose the `get_style_defs()` method exists: - - -Generating Style Definitions -============================ - -Some formatters like the `LatexFormatter` and the `HtmlFormatter` don't -output inline markup but reference either macros or css classes. Because -the definitions of those are not part of the output, the `get_style_defs()` -method exists. It is passed one parameter (if it's used and how it's used -is up to the formatter) and has to return a string or ``None``. diff --git a/doc/_build/html/_sources/docs/formatters.rst.txt b/doc/_build/html/_sources/docs/formatters.rst.txt deleted file mode 100644 index 9e7074e..0000000 --- a/doc/_build/html/_sources/docs/formatters.rst.txt +++ /dev/null @@ -1,48 +0,0 @@ -.. -*- mode: rst -*- - -==================== -Available formatters -==================== - -This page lists all builtin formatters. - -Common options -============== - -All formatters support these options: - -`encoding` - If given, must be an encoding name (such as ``"utf-8"``). This will - be used to convert the token strings (which are Unicode strings) - to byte strings in the output (default: ``None``). - It will also be written in an encoding declaration suitable for the - document format if the `full` option is given (e.g. a ``meta - content-type`` directive in HTML or an invocation of the `inputenc` - package in LaTeX). - - If this is ``""`` or ``None``, Unicode strings will be written - to the output file, which most file-like objects do not support. - For example, `pygments.highlight()` will return a Unicode string if - called with no `outfile` argument and a formatter that has `encoding` - set to ``None`` because it uses a `StringIO.StringIO` object that - supports Unicode arguments to `write()`. Using a regular file object - wouldn't work. - - .. versionadded:: 0.6 - -`outencoding` - When using Pygments from the command line, any `encoding` option given is - passed to the lexer and the formatter. This is sometimes not desirable, - for example if you want to set the input encoding to ``"guess"``. - Therefore, `outencoding` has been introduced which overrides `encoding` - for the formatter if given. - - .. versionadded:: 0.7 - - -Formatter classes -================= - -All these classes are importable from :mod:`pygments.formatters`. - -.. pygmentsdoc:: formatters diff --git a/doc/_build/html/_sources/docs/index.rst.txt b/doc/_build/html/_sources/docs/index.rst.txt deleted file mode 100644 index 4cf710f..0000000 --- a/doc/_build/html/_sources/docs/index.rst.txt +++ /dev/null @@ -1,61 +0,0 @@ -Pygments documentation -====================== - -**Starting with Pygments** - -.. toctree:: - :maxdepth: 1 - - ../download - quickstart - cmdline - -**Builtin components** - -.. toctree:: - :maxdepth: 1 - - lexers - filters - formatters - styles - -**Reference** - -.. toctree:: - :maxdepth: 1 - - unicode - tokens - api - -**Hacking for Pygments** - -.. toctree:: - :maxdepth: 1 - - lexerdevelopment - formatterdevelopment - filterdevelopment - plugins - -**Hints and tricks** - -.. toctree:: - :maxdepth: 1 - - rstdirective - moinmoin - java - integrate - -**About Pygments** - -.. toctree:: - :maxdepth: 1 - - changelog - authors - -If you find bugs or have suggestions for the documentation, please submit them -on `GitHub `. diff --git a/doc/_build/html/_sources/docs/integrate.rst.txt b/doc/_build/html/_sources/docs/integrate.rst.txt deleted file mode 100644 index 77daaa4..0000000 --- a/doc/_build/html/_sources/docs/integrate.rst.txt +++ /dev/null @@ -1,40 +0,0 @@ -.. -*- mode: rst -*- - -=================================== -Using Pygments in various scenarios -=================================== - -Markdown --------- - -Since Pygments 0.9, the distribution ships Markdown_ preprocessor sample code -that uses Pygments to render source code in -:file:`external/markdown-processor.py`. You can copy and adapt it to your -liking. - -.. _Markdown: http://www.freewisdom.org/projects/python-markdown/ - -TextMate --------- - -Antonio Cangiano has created a Pygments bundle for TextMate that allows to -colorize code via a simple menu option. It can be found here_. - -.. _here: http://antoniocangiano.com/2008/10/28/pygments-textmate-bundle/ - -Bash completion ---------------- - -The source distribution contains a file ``external/pygments.bashcomp`` that -sets up completion for the ``pygmentize`` command in bash. - -Wrappers for other languages ----------------------------- - -These libraries provide Pygments highlighting for users of other languages -than Python: - -* `pygments.rb `_, a pygments wrapper for Ruby -* `Clygments `_, a pygments wrapper for - Clojure -* `PHPygments `_, a pygments wrapper for PHP diff --git a/doc/_build/html/_sources/docs/java.rst.txt b/doc/_build/html/_sources/docs/java.rst.txt deleted file mode 100644 index f553463..0000000 --- a/doc/_build/html/_sources/docs/java.rst.txt +++ /dev/null @@ -1,70 +0,0 @@ -===================== -Use Pygments in Java -===================== - -Thanks to `Jython `_ it is possible to use Pygments in -Java. - -This page is a simple tutorial to get an idea of how this works. You can -then look at the `Jython documentation `_ for more -advanced uses. - -Since version 1.5, Pygments is deployed on `Maven Central -`_ as a JAR, as is Jython -which makes it a lot easier to create a Java project. - -Here is an example of a `Maven `_ ``pom.xml`` file for a -project running Pygments: - -.. sourcecode:: xml - - - - - 4.0.0 - example - example - 1.0-SNAPSHOT - - - org.python - jython-standalone - 2.5.3 - - - org.pygments - pygments - 1.5 - runtime - - - - -The following Java example: - -.. sourcecode:: java - - PythonInterpreter interpreter = new PythonInterpreter(); - - // Set a variable with the content you want to work with - interpreter.set("code", code); - - // Simple use Pygments as you would in Python - interpreter.exec("from pygments import highlight\n" - + "from pygments.lexers import PythonLexer\n" - + "from pygments.formatters import HtmlFormatter\n" - + "\nresult = highlight(code, PythonLexer(), HtmlFormatter())"); - - // Get the result that has been set in a variable - System.out.println(interpreter.get("result", String.class)); - -will print something like: - -.. sourcecode:: html - -
-
print "Hello World"
-
diff --git a/doc/_build/html/_sources/docs/lexerdevelopment.rst.txt b/doc/_build/html/_sources/docs/lexerdevelopment.rst.txt deleted file mode 100644 index 5b6813f..0000000 --- a/doc/_build/html/_sources/docs/lexerdevelopment.rst.txt +++ /dev/null @@ -1,728 +0,0 @@ -.. -*- mode: rst -*- - -.. highlight:: python - -==================== -Write your own lexer -==================== - -If a lexer for your favorite language is missing in the Pygments package, you -can easily write your own and extend Pygments. - -All you need can be found inside the :mod:`pygments.lexer` module. As you can -read in the :doc:`API documentation `, a lexer is a class that is -initialized with some keyword arguments (the lexer options) and that provides a -:meth:`.get_tokens_unprocessed()` method which is given a string or unicode -object with the data to lex. - -The :meth:`.get_tokens_unprocessed()` method must return an iterator or iterable -containing tuples in the form ``(index, token, value)``. Normally you don't -need to do this since there are base lexers that do most of the work and that -you can subclass. - - -RegexLexer -========== - -The lexer base class used by almost all of Pygments' lexers is the -:class:`RegexLexer`. This class allows you to define lexing rules in terms of -*regular expressions* for different *states*. - -States are groups of regular expressions that are matched against the input -string at the *current position*. If one of these expressions matches, a -corresponding action is performed (such as yielding a token with a specific -type, or changing state), the current position is set to where the last match -ended and the matching process continues with the first regex of the current -state. - -Lexer states are kept on a stack: each time a new state is entered, the new -state is pushed onto the stack. The most basic lexers (like the `DiffLexer`) -just need one state. - -Each state is defined as a list of tuples in the form (`regex`, `action`, -`new_state`) where the last item is optional. In the most basic form, `action` -is a token type (like `Name.Builtin`). That means: When `regex` matches, emit a -token with the match text and type `tokentype` and push `new_state` on the state -stack. If the new state is ``'#pop'``, the topmost state is popped from the -stack instead. To pop more than one state, use ``'#pop:2'`` and so on. -``'#push'`` is a synonym for pushing the current state on the stack. - -The following example shows the `DiffLexer` from the builtin lexers. Note that -it contains some additional attributes `name`, `aliases` and `filenames` which -aren't required for a lexer. They are used by the builtin lexer lookup -functions. :: - - from pygments.lexer import RegexLexer - from pygments.token import * - - class DiffLexer(RegexLexer): - name = 'Diff' - aliases = ['diff'] - filenames = ['*.diff'] - - tokens = { - 'root': [ - (r' .*\n', Text), - (r'\+.*\n', Generic.Inserted), - (r'-.*\n', Generic.Deleted), - (r'@.*\n', Generic.Subheading), - (r'Index.*\n', Generic.Heading), - (r'=.*\n', Generic.Heading), - (r'.*\n', Text), - ] - } - -As you can see this lexer only uses one state. When the lexer starts scanning -the text, it first checks if the current character is a space. If this is true -it scans everything until newline and returns the data as a `Text` token (which -is the "no special highlighting" token). - -If this rule doesn't match, it checks if the current char is a plus sign. And -so on. - -If no rule matches at the current position, the current char is emitted as an -`Error` token that indicates a lexing error, and the position is increased by -one. - - -Adding and testing a new lexer -============================== - -The easiest way to use a new lexer is to use Pygments' support for loading -the lexer from a file relative to your current directory. - -First, change the name of your lexer class to CustomLexer: - -.. code-block:: python - - from pygments.lexer import RegexLexer - from pygments.token import * - - class CustomLexer(RegexLexer): - """All your lexer code goes here!""" - -Then you can load the lexer from the command line with the additional -flag ``-x``: - -.. code-block:: console - - $ pygmentize -l your_lexer_file.py -x - -To specify a class name other than CustomLexer, append it with a colon: - -.. code-block:: console - - $ pygmentize -l your_lexer.py:SomeLexer -x - -Or, using the Python API: - -.. code-block:: python - - # For a lexer named CustomLexer - your_lexer = load_lexer_from_file(filename, **options) - - # For a lexer named MyNewLexer - your_named_lexer = load_lexer_from_file(filename, "MyNewLexer", **options) - -When loading custom lexers and formatters, be extremely careful to use only -trusted files; Pygments will perform the equivalent of ``eval`` on them. - -If you only want to use your lexer with the Pygments API, you can import and -instantiate the lexer yourself, then pass it to :func:`pygments.highlight`. - -To prepare your new lexer for inclusion in the Pygments distribution, so that it -will be found when passing filenames or lexer aliases from the command line, you -have to perform the following steps. - -First, change to the current directory containing the Pygments source code. You -will need to have either an unpacked source tarball, or (preferably) a copy -cloned from GitHub. - -.. code-block:: console - - $ cd .../pygments-main - -Select a matching module under ``pygments/lexers``, or create a new module for -your lexer class. - -Next, make sure the lexer is known from outside of the module. All modules in -the ``pygments.lexers`` package specify ``__all__``. For example, -``esoteric.py`` sets:: - - __all__ = ['BrainfuckLexer', 'BefungeLexer', ...] - -Add the name of your lexer class to this list (or create the list if your lexer -is the only class in the module). - -Finally the lexer can be made publicly known by rebuilding the lexer mapping: - -.. code-block:: console - - $ make mapfiles - -To test the new lexer, store an example file with the proper extension in -``tests/examplefiles``. For example, to test your ``DiffLexer``, add a -``tests/examplefiles/example.diff`` containing a sample diff output. - -Now you can use pygmentize to render your example to HTML: - -.. code-block:: console - - $ ./pygmentize -O full -f html -o /tmp/example.html tests/examplefiles/example.diff - -Note that this explicitly calls the ``pygmentize`` in the current directory -by preceding it with ``./``. This ensures your modifications are used. -Otherwise a possibly already installed, unmodified version without your new -lexer would have been called from the system search path (``$PATH``). - -To view the result, open ``/tmp/example.html`` in your browser. - -Once the example renders as expected, you should run the complete test suite: - -.. code-block:: console - - $ make test - -It also tests that your lexer fulfills the lexer API and certain invariants, -such as that the concatenation of all token text is the same as the input text. - - -Regex Flags -=========== - -You can either define regex flags locally in the regex (``r'(?x)foo bar'``) or -globally by adding a `flags` attribute to your lexer class. If no attribute is -defined, it defaults to `re.MULTILINE`. For more information about regular -expression flags see the page about `regular expressions`_ in the Python -documentation. - -.. _regular expressions: http://docs.python.org/library/re.html#regular-expression-syntax - - -Scanning multiple tokens at once -================================ - -So far, the `action` element in the rule tuple of regex, action and state has -been a single token type. Now we look at the first of several other possible -values. - -Here is a more complex lexer that highlights INI files. INI files consist of -sections, comments and ``key = value`` pairs:: - - from pygments.lexer import RegexLexer, bygroups - from pygments.token import * - - class IniLexer(RegexLexer): - name = 'INI' - aliases = ['ini', 'cfg'] - filenames = ['*.ini', '*.cfg'] - - tokens = { - 'root': [ - (r'\s+', Text), - (r';.*?$', Comment), - (r'\[.*?\]$', Keyword), - (r'(.*?)(\s*)(=)(\s*)(.*?)$', - bygroups(Name.Attribute, Text, Operator, Text, String)) - ] - } - -The lexer first looks for whitespace, comments and section names. Later it -looks for a line that looks like a key, value pair, separated by an ``'='`` -sign, and optional whitespace. - -The `bygroups` helper yields each capturing group in the regex with a different -token type. First the `Name.Attribute` token, then a `Text` token for the -optional whitespace, after that a `Operator` token for the equals sign. Then a -`Text` token for the whitespace again. The rest of the line is returned as -`String`. - -Note that for this to work, every part of the match must be inside a capturing -group (a ``(...)``), and there must not be any nested capturing groups. If you -nevertheless need a group, use a non-capturing group defined using this syntax: -``(?:some|words|here)`` (note the ``?:`` after the beginning parenthesis). - -If you find yourself needing a capturing group inside the regex which shouldn't -be part of the output but is used in the regular expressions for backreferencing -(eg: ``r'(<(foo|bar)>)(.*?)()'``), you can pass `None` to the bygroups -function and that group will be skipped in the output. - - -Changing states -=============== - -Many lexers need multiple states to work as expected. For example, some -languages allow multiline comments to be nested. Since this is a recursive -pattern it's impossible to lex just using regular expressions. - -Here is a lexer that recognizes C++ style comments (multi-line with ``/* */`` -and single-line with ``//`` until end of line):: - - from pygments.lexer import RegexLexer - from pygments.token import * - - class CppCommentLexer(RegexLexer): - name = 'Example Lexer with states' - - tokens = { - 'root': [ - (r'[^/]+', Text), - (r'/\*', Comment.Multiline, 'comment'), - (r'//.*?$', Comment.Singleline), - (r'/', Text) - ], - 'comment': [ - (r'[^*/]', Comment.Multiline), - (r'/\*', Comment.Multiline, '#push'), - (r'\*/', Comment.Multiline, '#pop'), - (r'[*/]', Comment.Multiline) - ] - } - -This lexer starts lexing in the ``'root'`` state. It tries to match as much as -possible until it finds a slash (``'/'``). If the next character after the slash -is an asterisk (``'*'``) the `RegexLexer` sends those two characters to the -output stream marked as `Comment.Multiline` and continues lexing with the rules -defined in the ``'comment'`` state. - -If there wasn't an asterisk after the slash, the `RegexLexer` checks if it's a -Singleline comment (i.e. followed by a second slash). If this also wasn't the -case it must be a single slash, which is not a comment starter (the separate -regex for a single slash must also be given, else the slash would be marked as -an error token). - -Inside the ``'comment'`` state, we do the same thing again. Scan until the -lexer finds a star or slash. If it's the opening of a multiline comment, push -the ``'comment'`` state on the stack and continue scanning, again in the -``'comment'`` state. Else, check if it's the end of the multiline comment. If -yes, pop one state from the stack. - -Note: If you pop from an empty stack you'll get an `IndexError`. (There is an -easy way to prevent this from happening: don't ``'#pop'`` in the root state). - -If the `RegexLexer` encounters a newline that is flagged as an error token, the -stack is emptied and the lexer continues scanning in the ``'root'`` state. This -can help producing error-tolerant highlighting for erroneous input, e.g. when a -single-line string is not closed. - - -Advanced state tricks -===================== - -There are a few more things you can do with states: - -- You can push multiple states onto the stack if you give a tuple instead of a - simple string as the third item in a rule tuple. For example, if you want to - match a comment containing a directive, something like: - - .. code-block:: text - - /* rest of comment */ - - you can use this rule:: - - tokens = { - 'root': [ - (r'/\* <', Comment, ('comment', 'directive')), - ... - ], - 'directive': [ - (r'[^>]*', Comment.Directive), - (r'>', Comment, '#pop'), - ], - 'comment': [ - (r'[^*]+', Comment), - (r'\*/', Comment, '#pop'), - (r'\*', Comment), - ] - } - - When this encounters the above sample, first ``'comment'`` and ``'directive'`` - are pushed onto the stack, then the lexer continues in the directive state - until it finds the closing ``>``, then it continues in the comment state until - the closing ``*/``. Then, both states are popped from the stack again and - lexing continues in the root state. - - .. versionadded:: 0.9 - The tuple can contain the special ``'#push'`` and ``'#pop'`` (but not - ``'#pop:n'``) directives. - - -- You can include the rules of a state in the definition of another. This is - done by using `include` from `pygments.lexer`:: - - from pygments.lexer import RegexLexer, bygroups, include - from pygments.token import * - - class ExampleLexer(RegexLexer): - tokens = { - 'comments': [ - (r'/\*.*?\*/', Comment), - (r'//.*?\n', Comment), - ], - 'root': [ - include('comments'), - (r'(function )(\w+)( {)', - bygroups(Keyword, Name, Keyword), 'function'), - (r'.', Text), - ], - 'function': [ - (r'[^}/]+', Text), - include('comments'), - (r'/', Text), - (r'\}', Keyword, '#pop'), - ] - } - - This is a hypothetical lexer for a language that consist of functions and - comments. Because comments can occur at toplevel and in functions, we need - rules for comments in both states. As you can see, the `include` helper saves - repeating rules that occur more than once (in this example, the state - ``'comment'`` will never be entered by the lexer, as it's only there to be - included in ``'root'`` and ``'function'``). - -- Sometimes, you may want to "combine" a state from existing ones. This is - possible with the `combined` helper from `pygments.lexer`. - - If you, instead of a new state, write ``combined('state1', 'state2')`` as the - third item of a rule tuple, a new anonymous state will be formed from state1 - and state2 and if the rule matches, the lexer will enter this state. - - This is not used very often, but can be helpful in some cases, such as the - `PythonLexer`'s string literal processing. - -- If you want your lexer to start lexing in a different state you can modify the - stack by overriding the `get_tokens_unprocessed()` method:: - - from pygments.lexer import RegexLexer - - class ExampleLexer(RegexLexer): - tokens = {...} - - def get_tokens_unprocessed(self, text, stack=('root', 'otherstate')): - for item in RegexLexer.get_tokens_unprocessed(self, text, stack): - yield item - - Some lexers like the `PhpLexer` use this to make the leading ``', Name.Tag), - ], - 'script-content': [ - (r'(.+?)(<\s*/\s*script\s*>)', - bygroups(using(JavascriptLexer), Name.Tag), - '#pop'), - ] - } - -Here the content of a ```` end tag is processed by the `JavascriptLexer`, -while the end tag is yielded as a normal token with the `Name.Tag` type. - -Also note the ``(r'<\s*script\s*', Name.Tag, ('script-content', 'tag'))`` rule. -Here, two states are pushed onto the state stack, ``'script-content'`` and -``'tag'``. That means that first ``'tag'`` is processed, which will lex -attributes and the closing ``>``, then the ``'tag'`` state is popped and the -next state on top of the stack will be ``'script-content'``. - -Since you cannot refer to the class currently being defined, use `this` -(imported from `pygments.lexer`) to refer to the current lexer class, i.e. -``using(this)``. This construct may seem unnecessary, but this is often the -most obvious way of lexing arbitrary syntax between fixed delimiters without -introducing deeply nested states. - -The `using()` helper has a special keyword argument, `state`, which works as -follows: if given, the lexer to use initially is not in the ``"root"`` state, -but in the state given by this argument. This does not work with advanced -`RegexLexer` subclasses such as `ExtendedRegexLexer` (see below). - -Any other keywords arguments passed to `using()` are added to the keyword -arguments used to create the lexer. - - -Delegating Lexer -================ - -Another approach for nested lexers is the `DelegatingLexer` which is for example -used for the template engine lexers. It takes two lexers as arguments on -initialisation: a `root_lexer` and a `language_lexer`. - -The input is processed as follows: First, the whole text is lexed with the -`language_lexer`. All tokens yielded with the special type of ``Other`` are -then concatenated and given to the `root_lexer`. The language tokens of the -`language_lexer` are then inserted into the `root_lexer`'s token stream at the -appropriate positions. :: - - from pygments.lexer import DelegatingLexer - from pygments.lexers.web import HtmlLexer, PhpLexer - - class HtmlPhpLexer(DelegatingLexer): - def __init__(self, **options): - super(HtmlPhpLexer, self).__init__(HtmlLexer, PhpLexer, **options) - -This procedure ensures that e.g. HTML with template tags in it is highlighted -correctly even if the template tags are put into HTML tags or attributes. - -If you want to change the needle token ``Other`` to something else, you can give -the lexer another token type as the third parameter:: - - DelegatingLexer.__init__(MyLexer, OtherLexer, Text, **options) - - -Callbacks -========= - -Sometimes the grammar of a language is so complex that a lexer would be unable -to process it just by using regular expressions and stacks. - -For this, the `RegexLexer` allows callbacks to be given in rule tuples, instead -of token types (`bygroups` and `using` are nothing else but preimplemented -callbacks). The callback must be a function taking two arguments: - -* the lexer itself -* the match object for the last matched rule - -The callback must then return an iterable of (or simply yield) ``(index, -tokentype, value)`` tuples, which are then just passed through by -`get_tokens_unprocessed()`. The ``index`` here is the position of the token in -the input string, ``tokentype`` is the normal token type (like `Name.Builtin`), -and ``value`` the associated part of the input string. - -You can see an example here:: - - from pygments.lexer import RegexLexer - from pygments.token import Generic - - class HypotheticLexer(RegexLexer): - - def headline_callback(lexer, match): - equal_signs = match.group(1) - text = match.group(2) - yield match.start(), Generic.Headline, equal_signs + text + equal_signs - - tokens = { - 'root': [ - (r'(=+)(.*?)(\1)', headline_callback) - ] - } - -If the regex for the `headline_callback` matches, the function is called with -the match object. Note that after the callback is done, processing continues -normally, that is, after the end of the previous match. The callback has no -possibility to influence the position. - -There are not really any simple examples for lexer callbacks, but you can see -them in action e.g. in the `SMLLexer` class in `ml.py`_. - -.. _ml.py: https://github.com/pygments/pygments/blob/master/pygments/lexers/ml.py - - -The ExtendedRegexLexer class -============================ - -The `RegexLexer`, even with callbacks, unfortunately isn't powerful enough for -the funky syntax rules of languages such as Ruby. - -But fear not; even then you don't have to abandon the regular expression -approach: Pygments has a subclass of `RegexLexer`, the `ExtendedRegexLexer`. -All features known from RegexLexers are available here too, and the tokens are -specified in exactly the same way, *except* for one detail: - -The `get_tokens_unprocessed()` method holds its internal state data not as local -variables, but in an instance of the `pygments.lexer.LexerContext` class, and -that instance is passed to callbacks as a third argument. This means that you -can modify the lexer state in callbacks. - -The `LexerContext` class has the following members: - -* `text` -- the input text -* `pos` -- the current starting position that is used for matching regexes -* `stack` -- a list containing the state stack -* `end` -- the maximum position to which regexes are matched, this defaults to - the length of `text` - -Additionally, the `get_tokens_unprocessed()` method can be given a -`LexerContext` instead of a string and will then process this context instead of -creating a new one for the string argument. - -Note that because you can set the current position to anything in the callback, -it won't be automatically be set by the caller after the callback is finished. -For example, this is how the hypothetical lexer above would be written with the -`ExtendedRegexLexer`:: - - from pygments.lexer import ExtendedRegexLexer - from pygments.token import Generic - - class ExHypotheticLexer(ExtendedRegexLexer): - - def headline_callback(lexer, match, ctx): - equal_signs = match.group(1) - text = match.group(2) - yield match.start(), Generic.Headline, equal_signs + text + equal_signs - ctx.pos = match.end() - - tokens = { - 'root': [ - (r'(=+)(.*?)(\1)', headline_callback) - ] - } - -This might sound confusing (and it can really be). But it is needed, and for an -example look at the Ruby lexer in `ruby.py`_. - -.. _ruby.py: https://github.com/pygments/pygments/blob/master/pygments/lexers/ruby.py - - -Handling Lists of Keywords -========================== - -For a relatively short list (hundreds) you can construct an optimized regular -expression directly using ``words()`` (longer lists, see next section). This -function handles a few things for you automatically, including escaping -metacharacters and Python's first-match rather than longest-match in -alternations. Feel free to put the lists themselves in -``pygments/lexers/_$lang_builtins.py`` (see examples there), and generated by -code if possible. - -An example of using ``words()`` is something like:: - - from pygments.lexer import RegexLexer, words, Name - - class MyLexer(RegexLexer): - - tokens = { - 'root': [ - (words(('else', 'elseif'), suffix=r'\b'), Name.Builtin), - (r'\w+', Name), - ], - } - -As you can see, you can add ``prefix`` and ``suffix`` parts to the constructed -regex. - - -Modifying Token Streams -======================= - -Some languages ship a lot of builtin functions (for example PHP). The total -amount of those functions differs from system to system because not everybody -has every extension installed. In the case of PHP there are over 3000 builtin -functions. That's an incredibly huge amount of functions, much more than you -want to put into a regular expression. - -But because only `Name` tokens can be function names this is solvable by -overriding the ``get_tokens_unprocessed()`` method. The following lexer -subclasses the `PythonLexer` so that it highlights some additional names as -pseudo keywords:: - - from pygments.lexers.python import PythonLexer - from pygments.token import Name, Keyword - - class MyPythonLexer(PythonLexer): - EXTRA_KEYWORDS = set(('foo', 'bar', 'foobar', 'barfoo', 'spam', 'eggs')) - - def get_tokens_unprocessed(self, text): - for index, token, value in PythonLexer.get_tokens_unprocessed(self, text): - if token is Name and value in self.EXTRA_KEYWORDS: - yield index, Keyword.Pseudo, value - else: - yield index, token, value - -The `PhpLexer` and `LuaLexer` use this method to resolve builtin functions. diff --git a/doc/_build/html/_sources/docs/lexers.rst.txt b/doc/_build/html/_sources/docs/lexers.rst.txt deleted file mode 100644 index ef40f14..0000000 --- a/doc/_build/html/_sources/docs/lexers.rst.txt +++ /dev/null @@ -1,69 +0,0 @@ -.. -*- mode: rst -*- - -================ -Available lexers -================ - -This page lists all available builtin lexers and the options they take. - -Currently, **all lexers** support these options: - -`stripnl` - Strip leading and trailing newlines from the input (default: ``True``) - -`stripall` - Strip all leading and trailing whitespace from the input (default: - ``False``). - -`ensurenl` - Make sure that the input ends with a newline (default: ``True``). This - is required for some lexers that consume input linewise. - - .. versionadded:: 1.3 - -`tabsize` - If given and greater than 0, expand tabs in the input (default: ``0``). - -`encoding` - If given, must be an encoding name (such as ``"utf-8"``). This encoding - will be used to convert the input string to Unicode (if it is not already - a Unicode string). The default is ``"guess"``. - - If this option is set to ``"guess"``, a simple UTF-8 vs. Latin-1 - detection is used, if it is set to ``"chardet"``, the - `chardet library `_ is used to - guess the encoding of the input. - - .. versionadded:: 0.6 - - -The "Short Names" field lists the identifiers that can be used with the -`get_lexer_by_name()` function. - -These lexers are builtin and can be imported from `pygments.lexers`: - -.. pygmentsdoc:: lexers - - -Iterating over all lexers -------------------------- - -.. versionadded:: 0.6 - -To get all lexers (both the builtin and the plugin ones), you can -use the `get_all_lexers()` function from the `pygments.lexers` -module: - -.. sourcecode:: pycon - - >>> from pygments.lexers import get_all_lexers - >>> i = get_all_lexers() - >>> i.next() - ('Diff', ('diff',), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')) - >>> i.next() - ('Delphi', ('delphi', 'objectpascal', 'pas', 'pascal'), ('*.pas',), ('text/x-pascal',)) - >>> i.next() - ('XML+Ruby', ('xml+erb', 'xml+ruby'), (), ()) - -As you can see, the return value is an iterator which yields tuples -in the form ``(name, aliases, filetypes, mimetypes)``. diff --git a/doc/_build/html/_sources/docs/moinmoin.rst.txt b/doc/_build/html/_sources/docs/moinmoin.rst.txt deleted file mode 100644 index 8b2216b..0000000 --- a/doc/_build/html/_sources/docs/moinmoin.rst.txt +++ /dev/null @@ -1,39 +0,0 @@ -.. -*- mode: rst -*- - -============================ -Using Pygments with MoinMoin -============================ - -From Pygments 0.7, the source distribution ships a `Moin`_ parser plugin that -can be used to get Pygments highlighting in Moin wiki pages. - -To use it, copy the file `external/moin-parser.py` from the Pygments -distribution to the `data/plugin/parser` subdirectory of your Moin instance. -Edit the options at the top of the file (currently ``ATTACHMENTS`` and -``INLINESTYLES``) and rename the file to the name that the parser directive -should have. For example, if you name the file ``code.py``, you can get a -highlighted Python code sample with this Wiki markup:: - - {{{ - #!code python - [...] - }}} - -where ``python`` is the Pygments name of the lexer to use. - -Additionally, if you set the ``ATTACHMENTS`` option to True, Pygments will also -be called for all attachments for whose filenames there is no other parser -registered. - -You are responsible for including CSS rules that will map the Pygments CSS -classes to colors. You can output a stylesheet file with `pygmentize`, put it -into the `htdocs` directory of your Moin instance and then include it in the -`stylesheets` configuration option in the Moin config, e.g.:: - - stylesheets = [('screen', '/htdocs/pygments.css')] - -If you do not want to do that and are willing to accept larger HTML output, you -can set the ``INLINESTYLES`` option to True. - - -.. _Moin: http://moinmoin.wikiwikiweb.de/ diff --git a/doc/_build/html/_sources/docs/plugins.rst.txt b/doc/_build/html/_sources/docs/plugins.rst.txt deleted file mode 100644 index a6f8d7b..0000000 --- a/doc/_build/html/_sources/docs/plugins.rst.txt +++ /dev/null @@ -1,93 +0,0 @@ -================ -Register Plugins -================ - -If you want to extend Pygments without hacking the sources, but want to -use the lexer/formatter/style/filter lookup functions (`lexers.get_lexer_by_name` -et al.), you can use `setuptools`_ entrypoints to add new lexers, formatters -or styles as if they were in the Pygments core. - -.. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools - -That means you can use your highlighter modules with the `pygmentize` script, -which relies on the mentioned functions. - - -Entrypoints -=========== - -Here is a list of setuptools entrypoints that Pygments understands: - -`pygments.lexers` - - This entrypoint is used for adding new lexers to the Pygments core. - The name of the entrypoint values doesn't really matter, Pygments extracts - required metadata from the class definition: - - .. sourcecode:: ini - - [pygments.lexers] - yourlexer = yourmodule:YourLexer - - Note that you have to define ``name``, ``aliases`` and ``filename`` - attributes so that you can use the highlighter from the command line: - - .. sourcecode:: python - - class YourLexer(...): - name = 'Name Of Your Lexer' - aliases = ['alias'] - filenames = ['*.ext'] - - -`pygments.formatters` - - You can use this entrypoint to add new formatters to Pygments. The - name of an entrypoint item is the name of the formatter. If you - prefix the name with a slash it's used as a filename pattern: - - .. sourcecode:: ini - - [pygments.formatters] - yourformatter = yourmodule:YourFormatter - /.ext = yourmodule:YourFormatter - - -`pygments.styles` - - To add a new style you can use this entrypoint. The name of the entrypoint - is the name of the style: - - .. sourcecode:: ini - - [pygments.styles] - yourstyle = yourmodule:YourStyle - - -`pygments.filters` - - Use this entrypoint to register a new filter. The name of the - entrypoint is the name of the filter: - - .. sourcecode:: ini - - [pygments.filters] - yourfilter = yourmodule:YourFilter - - -How To Use Entrypoints -====================== - -This documentation doesn't explain how to use those entrypoints because this is -covered in the `setuptools documentation`_. That page should cover everything -you need to write a plugin. - -.. _setuptools documentation: http://peak.telecommunity.com/DevCenter/setuptools - - -Extending The Core -================== - -If you have written a Pygments plugin that is open source, please inform us -about that. There is a high chance that we'll add it to the Pygments -distribution. diff --git a/doc/_build/html/_sources/docs/quickstart.rst.txt b/doc/_build/html/_sources/docs/quickstart.rst.txt deleted file mode 100644 index 3a823e7..0000000 --- a/doc/_build/html/_sources/docs/quickstart.rst.txt +++ /dev/null @@ -1,205 +0,0 @@ -.. -*- mode: rst -*- - -=========================== -Introduction and Quickstart -=========================== - - -Welcome to Pygments! This document explains the basic concepts and terms and -gives a few examples of how to use the library. - - -Architecture -============ - -There are four types of components that work together highlighting a piece of -code: - -* A **lexer** splits the source into tokens, fragments of the source that - have a token type that determines what the text represents semantically - (e.g., keyword, string, or comment). There is a lexer for every language - or markup format that Pygments supports. -* The token stream can be piped through **filters**, which usually modify - the token types or text fragments, e.g. uppercasing all keywords. -* A **formatter** then takes the token stream and writes it to an output - file, in a format such as HTML, LaTeX or RTF. -* While writing the output, a **style** determines how to highlight all the - different token types. It maps them to attributes like "red and bold". - - -Example -======= - -Here is a small example for highlighting Python code: - -.. sourcecode:: python - - from pygments import highlight - from pygments.lexers import PythonLexer - from pygments.formatters import HtmlFormatter - - code = 'print "Hello World"' - print(highlight(code, PythonLexer(), HtmlFormatter())) - -which prints something like this: - -.. sourcecode:: html - -
-
print "Hello World"
-
- -As you can see, Pygments uses CSS classes (by default, but you can change that) -instead of inline styles in order to avoid outputting redundant style information over -and over. A CSS stylesheet that contains all CSS classes possibly used in the output -can be produced by: - -.. sourcecode:: python - - print(HtmlFormatter().get_style_defs('.highlight')) - -The argument to :func:`get_style_defs` is used as an additional CSS selector: -the output may look like this: - -.. sourcecode:: css - - .highlight .k { color: #AA22FF; font-weight: bold } - .highlight .s { color: #BB4444 } - ... - - -Options -======= - -The :func:`highlight()` function supports a fourth argument called *outfile*, it -must be a file object if given. The formatted output will then be written to -this file instead of being returned as a string. - -Lexers and formatters both support options. They are given to them as keyword -arguments either to the class or to the lookup method: - -.. sourcecode:: python - - from pygments import highlight - from pygments.lexers import get_lexer_by_name - from pygments.formatters import HtmlFormatter - - lexer = get_lexer_by_name("python", stripall=True) - formatter = HtmlFormatter(linenos=True, cssclass="source") - result = highlight(code, lexer, formatter) - -This makes the lexer strip all leading and trailing whitespace from the input -(`stripall` option), lets the formatter output line numbers (`linenos` option), -and sets the wrapping ``
``'s class to ``source`` (instead of -``highlight``). - -Important options include: - -`encoding` : for lexers and formatters - Since Pygments uses Unicode strings internally, this determines which - encoding will be used to convert to or from byte strings. -`style` : for formatters - The name of the style to use when writing the output. - - -For an overview of builtin lexers and formatters and their options, visit the -:doc:`lexer ` and :doc:`formatters ` lists. - -For a documentation on filters, see :doc:`this page `. - - -Lexer and formatter lookup -========================== - -If you want to lookup a built-in lexer by its alias or a filename, you can use -one of the following methods: - -.. sourcecode:: pycon - - >>> from pygments.lexers import (get_lexer_by_name, - ... get_lexer_for_filename, get_lexer_for_mimetype) - - >>> get_lexer_by_name('python') - - - >>> get_lexer_for_filename('spam.rb') - - - >>> get_lexer_for_mimetype('text/x-perl') - - -All these functions accept keyword arguments; they will be passed to the lexer -as options. - -A similar API is available for formatters: use :func:`.get_formatter_by_name()` -and :func:`.get_formatter_for_filename()` from the :mod:`pygments.formatters` -module for this purpose. - - -Guessing lexers -=============== - -If you don't know the content of the file, or you want to highlight a file -whose extension is ambiguous, such as ``.html`` (which could contain plain HTML -or some template tags), use these functions: - -.. sourcecode:: pycon - - >>> from pygments.lexers import guess_lexer, guess_lexer_for_filename - - >>> guess_lexer('#!/usr/bin/python\nprint "Hello World!"') - - - >>> guess_lexer_for_filename('test.py', 'print "Hello World!"') - - -:func:`.guess_lexer()` passes the given content to the lexer classes' -:meth:`analyse_text()` method and returns the one for which it returns the -highest number. - -All lexers have two different filename pattern lists: the primary and the -secondary one. The :func:`.get_lexer_for_filename()` function only uses the -primary list, whose entries are supposed to be unique among all lexers. -:func:`.guess_lexer_for_filename()`, however, will first loop through all lexers -and look at the primary and secondary filename patterns if the filename matches. -If only one lexer matches, it is returned, else the guessing mechanism of -:func:`.guess_lexer()` is used with the matching lexers. - -As usual, keyword arguments to these functions are given to the created lexer -as options. - - -Command line usage -================== - -You can use Pygments from the command line, using the :program:`pygmentize` -script:: - - $ pygmentize test.py - -will highlight the Python file test.py using ANSI escape sequences -(a.k.a. terminal colors) and print the result to standard output. - -To output HTML, use the ``-f`` option:: - - $ pygmentize -f html -o test.html test.py - -to write an HTML-highlighted version of test.py to the file test.html. -Note that it will only be a snippet of HTML, if you want a full HTML document, -use the "full" option:: - - $ pygmentize -f html -O full -o test.html test.py - -This will produce a full HTML document with included stylesheet. - -A style can be selected with ``-O style=``. - -If you need a stylesheet for an existing HTML file using Pygments CSS classes, -it can be created with:: - - $ pygmentize -S default -f html > style.css - -where ``default`` is the style name. - -More options and tricks and be found in the :doc:`command line reference -`. diff --git a/doc/_build/html/_sources/docs/rstdirective.rst.txt b/doc/_build/html/_sources/docs/rstdirective.rst.txt deleted file mode 100644 index c0d503b..0000000 --- a/doc/_build/html/_sources/docs/rstdirective.rst.txt +++ /dev/null @@ -1,22 +0,0 @@ -.. -*- mode: rst -*- - -================================ -Using Pygments in ReST documents -================================ - -Many Python people use `ReST`_ for documentation their sourcecode, programs, -scripts et cetera. This also means that documentation often includes sourcecode -samples or snippets. - -You can easily enable Pygments support for your ReST texts using a custom -directive -- this is also how this documentation displays source code. - -From Pygments 0.9, the directive is shipped in the distribution as -`external/rst-directive.py`. You can copy and adapt this code to your liking. - -.. removed -- too confusing - *Loosely related note:* The ReST lexer now recognizes ``.. sourcecode::`` and - ``.. code::`` directives and highlights the contents in the specified language - if the `handlecodeblocks` option is true. - -.. _ReST: http://docutils.sf.net/rst.html diff --git a/doc/_build/html/_sources/docs/styles.rst.txt b/doc/_build/html/_sources/docs/styles.rst.txt deleted file mode 100644 index 570293a..0000000 --- a/doc/_build/html/_sources/docs/styles.rst.txt +++ /dev/null @@ -1,232 +0,0 @@ -.. -*- mode: rst -*- - -====== -Styles -====== - -Pygments comes with some builtin styles that work for both the HTML and -LaTeX formatter. - -The builtin styles can be looked up with the `get_style_by_name` function: - -.. sourcecode:: pycon - - >>> from pygments.styles import get_style_by_name - >>> get_style_by_name('colorful') - - -You can pass a instance of a `Style` class to a formatter as the `style` -option in form of a string: - -.. sourcecode:: pycon - - >>> from pygments.styles import get_style_by_name - >>> from pygments.formatters import HtmlFormatter - >>> HtmlFormatter(style='colorful').style - - -Or you can also import your own style (which must be a subclass of -`pygments.style.Style`) and pass it to the formatter: - -.. sourcecode:: pycon - - >>> from yourapp.yourmodule import YourStyle - >>> from pygments.formatters import HtmlFormatter - >>> HtmlFormatter(style=YourStyle).style - - - -Creating Own Styles -=================== - -So, how to create a style? All you have to do is to subclass `Style` and -define some styles: - -.. sourcecode:: python - - from pygments.style import Style - from pygments.token import Keyword, Name, Comment, String, Error, \ - Number, Operator, Generic - - class YourStyle(Style): - default_style = "" - styles = { - Comment: 'italic #888', - Keyword: 'bold #005', - Name: '#f00', - Name.Function: '#0f0', - Name.Class: 'bold #0f0', - String: 'bg:#eee #111' - } - -That's it. There are just a few rules. When you define a style for `Name` -the style automatically also affects `Name.Function` and so on. If you -defined ``'bold'`` and you don't want boldface for a subtoken use ``'nobold'``. - -(Philosophy: the styles aren't written in CSS syntax since this way -they can be used for a variety of formatters.) - -`default_style` is the style inherited by all token types. - -To make the style usable for Pygments, you must - -* either register it as a plugin (see :doc:`the plugin docs `) -* or drop it into the `styles` subpackage of your Pygments distribution one style - class per style, where the file name is the style name and the class name is - `StylenameClass`. For example, if your style should be called - ``"mondrian"``, name the class `MondrianStyle`, put it into the file - ``mondrian.py`` and this file into the ``pygments.styles`` subpackage - directory. - - -Style Rules -=========== - -Here a small overview of all allowed styles: - -``bold`` - render text as bold -``nobold`` - don't render text as bold (to prevent subtokens being highlighted bold) -``italic`` - render text italic -``noitalic`` - don't render text as italic -``underline`` - render text underlined -``nounderline`` - don't render text underlined -``bg:`` - transparent background -``bg:#000000`` - background color (black) -``border:`` - no border -``border:#ffffff`` - border color (white) -``#ff0000`` - text color (red) -``noinherit`` - don't inherit styles from supertoken - -Note that there may not be a space between ``bg:`` and the color value -since the style definition string is split at whitespace. -Also, using named colors is not allowed since the supported color names -vary for different formatters. - -Furthermore, not all lexers might support every style. - - -Builtin Styles -============== - -Pygments ships some builtin styles which are maintained by the Pygments team. - -To get a list of known styles you can use this snippet: - -.. sourcecode:: pycon - - >>> from pygments.styles import STYLE_MAP - >>> STYLE_MAP.keys() - ['default', 'emacs', 'friendly', 'colorful'] - - -Getting a list of available styles -================================== - -.. versionadded:: 0.6 - -Because it could be that a plugin registered a style, there is -a way to iterate over all styles: - -.. sourcecode:: pycon - - >>> from pygments.styles import get_all_styles - >>> styles = list(get_all_styles()) - - -.. _AnsiTerminalStyle: - -Terminal Styles -=============== - -.. versionadded:: 2.2 - -Custom styles used with the 256-color terminal formatter can also map colors to -use the 8 default ANSI colors. To do so, use ``ansigreen``, ``ansibrightred`` or -any other colors defined in :attr:`pygments.style.ansicolors`. Foreground ANSI -colors will be mapped to the corresponding `escape codes 30 to 37 -`_ thus respecting any -custom color mapping and themes provided by many terminal emulators. Light -variants are treated as foreground color with and an added bold flag. -``bg:ansi`` will also be respected, except the light variant will be the -same shade as their dark variant. - -See the following example where the color of the string ``"hello world"`` is -governed by the escape sequence ``\x1b[34;01m`` (Ansi bright blue, Bold, 41 being red -background) instead of an extended foreground & background color. - -.. sourcecode:: pycon - - >>> from pygments import highlight - >>> from pygments.style import Style - >>> from pygments.token import Token - >>> from pygments.lexers import Python3Lexer - >>> from pygments.formatters import Terminal256Formatter - - >>> class MyStyle(Style): - styles = { - Token.String: 'ansibrightblue bg:ansibrightred', - } - - >>> code = 'print("Hello World")' - >>> result = highlight(code, Python3Lexer(), Terminal256Formatter(style=MyStyle)) - >>> print(result.encode()) - b'\x1b[34;41;01m"\x1b[39;49;00m\x1b[34;41;01mHello World\x1b[39;49;00m\x1b[34;41;01m"\x1b[39;49;00m' - -Colors specified using ``ansi*`` are converted to a default set of RGB colors -when used with formatters other than the terminal-256 formatter. - -By definition of ANSI, the following colors are considered "light" colors, and -will be rendered by most terminals as bold: - -- "brightblack" (darkgrey), "brightred", "brightgreen", "brightyellow", "brightblue", - "brightmagenta", "brightcyan", "white" - -The following are considered "dark" colors and will be rendered as non-bold: - -- "black", "red", "green", "yellow", "blue", "magenta", "cyan", - "gray" - -Exact behavior might depends on the terminal emulator you are using, and its -settings. - -.. _new-ansi-color-names: - -.. versionchanged:: 2.4 - -The definition of the ANSI color names has changed. -New names are easier to understand and align to the colors used in other projects. - -===================== ==================== -New names Pygments up to 2.3 -===================== ==================== -``ansiblack`` ``#ansiblack`` -``ansired`` ``#ansidarkred`` -``ansigreen`` ``#ansidarkgreen`` -``ansiyellow`` ``#ansibrown`` -``ansiblue`` ``#ansidarkblue`` -``ansimagenta`` ``#ansipurple`` -``ansicyan`` ``#ansiteal`` -``ansigray`` ``#ansilightgray`` -``ansibrightblack`` ``#ansidarkgray`` -``ansibrightred`` ``#ansired`` -``ansibrightgreen`` ``#ansigreen`` -``ansibrightyellow`` ``#ansiyellow`` -``ansibrightblue`` ``#ansiblue`` -``ansibrightmagenta`` ``#ansifuchsia`` -``ansibrightcyan`` ``#ansiturquoise`` -``ansiwhite`` ``#ansiwhite`` -===================== ==================== - -Old ANSI color names are deprecated but will still work. diff --git a/doc/_build/html/_sources/docs/tokens.rst.txt b/doc/_build/html/_sources/docs/tokens.rst.txt deleted file mode 100644 index 801fc63..0000000 --- a/doc/_build/html/_sources/docs/tokens.rst.txt +++ /dev/null @@ -1,372 +0,0 @@ -.. -*- mode: rst -*- - -============== -Builtin Tokens -============== - -.. module:: pygments.token - -In the :mod:`pygments.token` module, there is a special object called `Token` -that is used to create token types. - -You can create a new token type by accessing an attribute of `Token`: - -.. sourcecode:: pycon - - >>> from pygments.token import Token - >>> Token.String - Token.String - >>> Token.String is Token.String - True - -Note that tokens are singletons so you can use the ``is`` operator for comparing -token types. - -As of Pygments 0.7 you can also use the ``in`` operator to perform set tests: - -.. sourcecode:: pycon - - >>> from pygments.token import Comment - >>> Comment.Single in Comment - True - >>> Comment in Comment.Multi - False - -This can be useful in :doc:`filters ` and if you write lexers on your -own without using the base lexers. - -You can also split a token type into a hierarchy, and get the parent of it: - -.. sourcecode:: pycon - - >>> String.split() - [Token, Token.Literal, Token.Literal.String] - >>> String.parent - Token.Literal - -In principle, you can create an unlimited number of token types but nobody can -guarantee that a style would define style rules for a token type. Because of -that, Pygments proposes some global token types defined in the -`pygments.token.STANDARD_TYPES` dict. - -For some tokens aliases are already defined: - -.. sourcecode:: pycon - - >>> from pygments.token import String - >>> String - Token.Literal.String - -Inside the :mod:`pygments.token` module the following aliases are defined: - -============= ============================ ==================================== -`Text` `Token.Text` for any type of text data -`Whitespace` `Token.Text.Whitespace` for specially highlighted whitespace -`Error` `Token.Error` represents lexer errors -`Other` `Token.Other` special token for data not - matched by a parser (e.g. HTML - markup in PHP code) -`Keyword` `Token.Keyword` any kind of keywords -`Name` `Token.Name` variable/function names -`Literal` `Token.Literal` Any literals -`String` `Token.Literal.String` string literals -`Number` `Token.Literal.Number` number literals -`Operator` `Token.Operator` operators (``+``, ``not``...) -`Punctuation` `Token.Punctuation` punctuation (``[``, ``(``...) -`Comment` `Token.Comment` any kind of comments -`Generic` `Token.Generic` generic tokens (have a look at - the explanation below) -============= ============================ ==================================== - -The `Whitespace` token type is new in Pygments 0.8. It is used only by the -`VisibleWhitespaceFilter` currently. - -Normally you just create token types using the already defined aliases. For each -of those token aliases, a number of subtypes exists (excluding the special tokens -`Token.Text`, `Token.Error` and `Token.Other`) - -The `is_token_subtype()` function in the `pygments.token` module can be used to -test if a token type is a subtype of another (such as `Name.Tag` and `Name`). -(This is the same as ``Name.Tag in Name``. The overloaded `in` operator was newly -introduced in Pygments 0.7, the function still exists for backwards -compatibility.) - -With Pygments 0.7, it's also possible to convert strings to token types (for example -if you want to supply a token from the command line): - -.. sourcecode:: pycon - - >>> from pygments.token import String, string_to_tokentype - >>> string_to_tokentype("String") - Token.Literal.String - >>> string_to_tokentype("Token.Literal.String") - Token.Literal.String - >>> string_to_tokentype(String) - Token.Literal.String - - -Keyword Tokens -============== - -`Keyword` - For any kind of keyword (especially if it doesn't match any of the - subtypes of course). - -`Keyword.Constant` - For keywords that are constants (e.g. ``None`` in future Python versions). - -`Keyword.Declaration` - For keywords used for variable declaration (e.g. ``var`` in some programming - languages like JavaScript). - -`Keyword.Namespace` - For keywords used for namespace declarations (e.g. ``import`` in Python and - Java and ``package`` in Java). - -`Keyword.Pseudo` - For keywords that aren't really keywords (e.g. ``None`` in old Python - versions). - -`Keyword.Reserved` - For reserved keywords. - -`Keyword.Type` - For builtin types that can't be used as identifiers (e.g. ``int``, - ``char`` etc. in C). - - -Name Tokens -=========== - -`Name` - For any name (variable names, function names, classes). - -`Name.Attribute` - For all attributes (e.g. in HTML tags). - -`Name.Builtin` - Builtin names; names that are available in the global namespace. - -`Name.Builtin.Pseudo` - Builtin names that are implicit (e.g. ``self`` in Ruby, ``this`` in Java). - -`Name.Class` - Class names. Because no lexer can know if a name is a class or a function - or something else this token is meant for class declarations. - -`Name.Constant` - Token type for constants. In some languages you can recognise a token by the - way it's defined (the value after a ``const`` keyword for example). In - other languages constants are uppercase by definition (Ruby). - -`Name.Decorator` - Token type for decorators. Decorators are syntactic elements in the Python - language. Similar syntax elements exist in C# and Java. - -`Name.Entity` - Token type for special entities. (e.g. `` `` in HTML). - -`Name.Exception` - Token type for exception names (e.g. ``RuntimeError`` in Python). Some languages - define exceptions in the function signature (Java). You can highlight - the name of that exception using this token then. - -`Name.Function` - Token type for function names. - -`Name.Function.Magic` - same as `Name.Function` but for special function names that have an implicit use - in a language (e.g. ``__init__`` method in Python). - -`Name.Label` - Token type for label names (e.g. in languages that support ``goto``). - -`Name.Namespace` - Token type for namespaces. (e.g. import paths in Java/Python), names following - the ``module``/``namespace`` keyword in other languages. - -`Name.Other` - Other names. Normally unused. - -`Name.Tag` - Tag names (in HTML/XML markup or configuration files). - -`Name.Variable` - Token type for variables. Some languages have prefixes for variable names - (PHP, Ruby, Perl). You can highlight them using this token. - -`Name.Variable.Class` - same as `Name.Variable` but for class variables (also static variables). - -`Name.Variable.Global` - same as `Name.Variable` but for global variables (used in Ruby, for - example). - -`Name.Variable.Instance` - same as `Name.Variable` but for instance variables. - -`Name.Variable.Magic` - same as `Name.Variable` but for special variable names that have an implicit use - in a language (e.g. ``__doc__`` in Python). - - -Literals -======== - -`Literal` - For any literal (if not further defined). - -`Literal.Date` - for date literals (e.g. ``42d`` in Boo). - - -`String` - For any string literal. - -`String.Affix` - Token type for affixes that further specify the type of the string they're - attached to (e.g. the prefixes ``r`` and ``u8`` in ``r"foo"`` and ``u8"foo"``). - -`String.Backtick` - Token type for strings enclosed in backticks. - -`String.Char` - Token type for single characters (e.g. Java, C). - -`String.Delimiter` - Token type for delimiting identifiers in "heredoc", raw and other similar - strings (e.g. the word ``END`` in Perl code ``print <<'END';``). - -`String.Doc` - Token type for documentation strings (for example Python). - -`String.Double` - Double quoted strings. - -`String.Escape` - Token type for escape sequences in strings. - -`String.Heredoc` - Token type for "heredoc" strings (e.g. in Ruby or Perl). - -`String.Interpol` - Token type for interpolated parts in strings (e.g. ``#{foo}`` in Ruby). - -`String.Other` - Token type for any other strings (for example ``%q{foo}`` string constructs - in Ruby). - -`String.Regex` - Token type for regular expression literals (e.g. ``/foo/`` in JavaScript). - -`String.Single` - Token type for single quoted strings. - -`String.Symbol` - Token type for symbols (e.g. ``:foo`` in LISP or Ruby). - - -`Number` - Token type for any number literal. - -`Number.Bin` - Token type for binary literals (e.g. ``0b101010``). - -`Number.Float` - Token type for float literals (e.g. ``42.0``). - -`Number.Hex` - Token type for hexadecimal number literals (e.g. ``0xdeadbeef``). - -`Number.Integer` - Token type for integer literals (e.g. ``42``). - -`Number.Integer.Long` - Token type for long integer literals (e.g. ``42L`` in Python). - -`Number.Oct` - Token type for octal literals. - - -Operators -========= - -`Operator` - For any punctuation operator (e.g. ``+``, ``-``). - -`Operator.Word` - For any operator that is a word (e.g. ``not``). - - -Punctuation -=========== - -.. versionadded:: 0.7 - -`Punctuation` - For any punctuation which is not an operator (e.g. ``[``, ``(``...) - - -Comments -======== - -`Comment` - Token type for any comment. - -`Comment.Hashbang` - Token type for hashbang comments (i.e. first lines of files that start with - ``#!``). - -`Comment.Multiline` - Token type for multiline comments. - -`Comment.Preproc` - Token type for preprocessor comments (also ```. - -.. versionadded:: 0.7 - The formatters now also accept an `outencoding` option which will override - the `encoding` option if given. This makes it possible to use a single - options dict with lexers and formatters, and still have different input and - output encodings. - -.. _chardet: https://chardet.github.io/ diff --git a/doc/_build/html/_sources/download.rst.txt b/doc/_build/html/_sources/download.rst.txt deleted file mode 100644 index 975c41b..0000000 --- a/doc/_build/html/_sources/download.rst.txt +++ /dev/null @@ -1,39 +0,0 @@ -Download and installation -========================= - -The current release is version |version|. - -Packaged versions ------------------ - -You can download it `from the Python Package Index -`_. For installation of packages from -PyPI, we recommend `Pip `_, which works on all -major platforms. - -Under Linux, most distributions include a package for Pygments, usually called -``pygments`` or ``python-pygments``. You can install it with the package -manager as usual. - -Development sources -------------------- - -We're using the Git version control system. You can get the development source -using this command:: - - git clone https://github.com/pygments/pygments - -Development takes place at `GitHub `_. - -The latest changes in the development source code are listed in the `changelog -`_. - -.. Documentation - ------------- - -.. XXX todo - - You can download the documentation either as - a bunch of rst files from the Git repository, see above, or - as a tar.gz containing rendered HTML files:

-

pygmentsdocs.tar.gz

diff --git a/doc/_build/html/_sources/faq.rst.txt b/doc/_build/html/_sources/faq.rst.txt deleted file mode 100644 index 108cef4..0000000 --- a/doc/_build/html/_sources/faq.rst.txt +++ /dev/null @@ -1,140 +0,0 @@ -:orphan: - -Pygments FAQ -============= - -What is Pygments? ------------------ - -Pygments is a syntax highlighting engine written in Python. That means, it will -take source code (or other markup) in a supported language and output a -processed version (in different formats) containing syntax highlighting markup. - -Its features include: - -* a wide range of common :doc:`languages and markup formats ` is supported -* new languages and formats are added easily -* a number of output formats is available, including: - - - HTML - - ANSI sequences (console output) - - LaTeX - - RTF - -* it is usable as a command-line tool and as a library -* parsing and formatting is fast - -Pygments is licensed under the BSD license. - -Where does the name Pygments come from? ---------------------------------------- - -*Py* of course stands for Python, while *pigments* are used for coloring paint, -and in this case, source code! - -What are the system requirements? ---------------------------------- - -Pygments only needs a standard Python install, version 2.7 or higher or version -3.5 or higher for Python 3. No additional libraries are needed. - -How can I use Pygments? ------------------------ - -Pygments is usable as a command-line tool as well as a library. - -From the command-line, usage looks like this (assuming the pygmentize script is -properly installed):: - - pygmentize -f html /path/to/file.py - -This will print a HTML-highlighted version of /path/to/file.py to standard output. - -For a complete help, please run ``pygmentize -h``. - -Usage as a library is thoroughly demonstrated in the Documentation section. - -How do I make a new style? --------------------------- - -Please see the :doc:`documentation on styles `. - -How can I report a bug or suggest a feature? --------------------------------------------- - -Please report bugs and feature wishes in the tracker at GitHub. - -You can also e-mail the authors, see the contact details. - -I want this support for this language! --------------------------------------- - -Instead of waiting for others to include language support, why not write it -yourself? All you have to know is :doc:`outlined in the docs -`. - -Can I use Pygments for programming language processing? -------------------------------------------------------- - -The Pygments lexing machinery is quite powerful can be used to build lexers for -basically all languages. However, parsing them is not possible, though some -lexers go some steps in this direction in order to e.g. highlight function names -differently. - -Also, error reporting is not the scope of Pygments. It focuses on correctly -highlighting syntactically valid documents, not finding and compensating errors. - -Who uses Pygments? ------------------- - -This is an (incomplete) list of projects and sites known to use the Pygments highlighter. - -* `Wikipedia `_ -* `BitBucket `_, a Mercurial and Git hosting site -* `The Sphinx documentation builder `_, for embedded source examples -* `rst2pdf `_, a reStructuredText to PDF converter -* `Codecov `_, a code coverage CI service -* `Trac `_, the universal project management tool -* `AsciiDoc `_, a text-based documentation generator -* `ActiveState Code `_, the Python Cookbook successor -* `ViewVC `_, a web-based version control repository browser -* `BzrFruit `_, a Bazaar branch viewer -* `QBzr `_, a cross-platform Qt-based GUI front end for Bazaar -* `Review Board `_, a collaborative code reviewing tool -* `Diamanda `_, a Django powered wiki system with support for Pygments -* `Progopedia `_ (`English `_), - an encyclopedia of programming languages -* `Bruce `_, a reStructuredText presentation tool -* `PIDA `_, a universal IDE written in Python -* `BPython `_, a curses-based intelligent Python shell -* `PuDB `_, a console Python debugger -* `XWiki `_, a wiki-based development framework in Java, using Jython -* `roux `_, a script for running R scripts - and creating beautiful output including graphs -* `hurl `_, a web service for making HTTP requests -* `wxHTMLPygmentizer `_ is - a GUI utility, used to make code-colorization easier -* `Postmarkup `_, a BBCode to XHTML generator -* `WpPygments `_, and `WPygments - `_, highlighter plugins for WordPress -* `Siafoo `_, a tool for sharing and storing useful code and programming experience -* `D source `_, a community for the D programming language -* `dpaste.com `_, another Django pastebin -* `Django snippets `_, a pastebin for Django code -* `Fayaa `_, a Chinese pastebin -* `Incollo.com `_, a free collaborative debugging tool -* `PasteBox `_, a pastebin focused on privacy -* `hilite.me `_, a site to highlight code snippets -* `patx.me `_, a pastebin -* `Fluidic `_, an experiment in - integrating shells with a GUI -* `pygments.rb `_, a pygments wrapper for Ruby -* `Clygments `_, a pygments wrapper for - Clojure -* `PHPygments `_, a pygments wrapper for PHP -* `Spyder `_, the Scientific Python Development - Environment, uses pygments for the multi-language syntax highlighting in its - `editor `_. - -If you have a project or web site using Pygments, drop me a line, and I'll add a -link here. diff --git a/doc/_build/html/_sources/index.rst.txt b/doc/_build/html/_sources/index.rst.txt deleted file mode 100644 index d89277e..0000000 --- a/doc/_build/html/_sources/index.rst.txt +++ /dev/null @@ -1,49 +0,0 @@ -Welcome! -======== - -This is the home of Pygments. It is a generic syntax highlighter suitable for -use in code hosting, forums, wikis or other applications that need to prettify -source code. Highlights are: - -* a wide range of over 300 languages and other text formats is supported -* special attention is paid to details that increase highlighting quality -* support for new languages and formats are added easily; most languages use a - simple regex-based lexing mechanism -* a number of output formats is available, among them HTML, RTF, LaTeX and ANSI - sequences -* it is usable as a command-line tool and as a library -* ... and it highlights even Perl 6! - -Read more in the :doc:`FAQ list ` or the :doc:`documentation `, -or `download the latest release `_. - -.. _contribute: - -Contribute ----------- - -Like every open-source project, we are always looking for volunteers to help us -with programming. Python knowledge is required, but don't fear: Python is a very -clear and easy to learn language. - -Development takes place on `GitHub `_. - -If you found a bug, just open a ticket in the GitHub tracker. Be sure to log -in to be notified when the issue is fixed -- development is not fast-paced as -the library is quite stable. You can also send an e-mail to the developers, see -below. - -The authors ------------ - -Pygments is maintained by **Georg Brandl**, e-mail address *georg*\ *@*\ *python.org* -and **Matthäus Chajdas**. - -Many lexers and fixes have been contributed by **Armin Ronacher**, the rest of -the `Pocoo `_ team and **Tim Hatch**. - -.. toctree:: - :maxdepth: 1 - :hidden: - - docs/index diff --git a/doc/_build/html/_sources/languages.rst.txt b/doc/_build/html/_sources/languages.rst.txt deleted file mode 100644 index a91664c..0000000 --- a/doc/_build/html/_sources/languages.rst.txt +++ /dev/null @@ -1,176 +0,0 @@ -:orphan: - -Supported languages -=================== - -Pygments supports an ever-growing range of languages. Watch this space... - -Programming languages ---------------------- - -* ActionScript -* Ada -* ANTLR -* AppleScript -* Assembly (various) -* Asymptote -* `Augeas `_ -* Awk -* BBC Basic -* Befunge -* `Boa `_ -* Boo -* BrainFuck -* C, C++ -* C# -* `Charm++ CI `_ -* Clojure -* CoffeeScript -* ColdFusion -* Common Lisp -* Coq -* Cryptol (incl. Literate Cryptol) -* `Crystal `_ -* `Cython `_ -* `D `_ -* Dart -* DCPU-16 -* Delphi -* Dylan -* `Elm `_ -* Email -* Erlang -* `Ezhil `_ Ezhil - A Tamil programming language -* Factor -* Fancy -* `Fennel `_ -* `FloScript `_ -* Fortran -* `FreeFEM++ `_ -* F# -* GAP -* Gherkin (Cucumber) -* GL shaders -* Groovy -* `Haskell `_ (incl. Literate Haskell) -* HLSL -* `HSpec `_ -* IDL -* Io -* Java -* JavaScript -* Lasso -* LLVM -* Logtalk -* `Lua `_ -* Matlab -* MiniD -* Modelica -* Modula-2 -* MuPad -* Nemerle -* Nimrod -* Notmuch -* Objective-C -* Objective-J -* Octave -* OCaml -* PHP -* `Perl 5 `_ and `Perl 6 `_ -* `Pony `_ -* PovRay -* PostScript -* PowerShell -* Prolog -* `Python `_ 2.x and 3.x (incl. console sessions and tracebacks) -* `REBOL `_ -* `Red `_ -* Redcode -* `Ruby `_ (incl. irb sessions) -* Rust -* S, S-Plus, R -* Scala -* `Scdoc `_ -* Scheme -* Scilab -* `SGF `_ -* `Slash `_ -* `Slurm `_ -* Smalltalk -* SNOBOL -* `Solidity `_ -* Tcl -* `Tera Term language `_ -* `TOML `_ -* Vala -* Verilog -* VHDL -* Visual Basic.NET -* Visual FoxPro -* XQuery -* `Zeek `_ -* Zephir -* `Zig `_ - -Template languages ------------------- - -* Cheetah templates -* `Django `_ / `Jinja - `_ templates -* ERB (Ruby templating) -* `Genshi `_ (the Trac template language) -* JSP (Java Server Pages) -* `Myghty `_ (the HTML::Mason based framework) -* `Mako `_ (the Myghty successor) -* `Smarty `_ templates (PHP templating) -* Tea - -Other markup ------------- - -* Apache config files -* Bash shell scripts -* BBCode -* CMake -* CSS -* Debian control files -* Diff files -* DTD -* Gettext catalogs -* Gnuplot script -* Groff markup -* HTML -* HTTP sessions -* INI-style config files -* IRC logs (irssi style) -* Lighttpd config files -* Makefiles -* MoinMoin/Trac Wiki markup -* MySQL -* Nginx config files -* POV-Ray scenes -* Ragel -* Redcode -* ReST -* Robot Framework -* RPM spec files -* SQL, also MySQL, SQLite -* Squid configuration -* TeX -* tcsh -* Vim Script -* Windows batch files -* XML -* XSLT -* YAML - -... that's all? ---------------- - -Well, why not write your own? Contributing to Pygments is easy and fun. Take a -look at the :doc:`docs on lexer development `. Pull -requests are welcome on `GitHub `. - -Note: the languages listed here are supported in the development version. The -latest release may lack a few of them. diff --git a/doc/_build/html/_static/basic.css b/doc/_build/html/_static/basic.css deleted file mode 100644 index ea6972d..0000000 --- a/doc/_build/html/_static/basic.css +++ /dev/null @@ -1,764 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox form.search { - overflow: hidden; -} - -div.sphinxsidebar #searchbox input[type="text"] { - float: left; - width: 80%; - padding: 0.25em; - box-sizing: border-box; -} - -div.sphinxsidebar #searchbox input[type="submit"] { - float: left; - width: 20%; - border-left: none; - padding: 0.25em; - box-sizing: border-box; -} - - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; - margin-left: auto; - margin-right: auto; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body { - min-width: 450px; - max-width: 800px; -} - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} - -a.brackets:before, -span.brackets > a:before{ - content: "["; -} - -a.brackets:after, -span.brackets > a:after { - content: "]"; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -img.align-default, .figure.align-default { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-default { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px 7px 0 7px; - background-color: #ffe; - width: 40%; - float: right; -} - -p.sidebar-title { - font-weight: bold; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px 7px 0 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -div.admonition dl { - margin-bottom: 0; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - border: 0; - border-collapse: collapse; -} - -table.align-center { - margin-left: auto; - margin-right: auto; -} - -table.align-default { - margin-left: auto; - margin-right: auto; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -th > p:first-child, -td > p:first-child { - margin-top: 0px; -} - -th > p:last-child, -td > p:last-child { - margin-bottom: 0px; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -/* -- hlist styles ---------------------------------------------------------- */ - -table.hlist td { - vertical-align: top; -} - - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -li > p:first-child { - margin-top: 0px; -} - -li > p:last-child { - margin-bottom: 0px; -} - -dl.footnote > dt, -dl.citation > dt { - float: left; -} - -dl.footnote > dd, -dl.citation > dd { - margin-bottom: 0em; -} - -dl.footnote > dd:after, -dl.citation > dd:after { - content: ""; - clear: both; -} - -dl.field-list { - display: grid; - grid-template-columns: fit-content(30%) auto; -} - -dl.field-list > dt { - font-weight: bold; - word-break: break-word; - padding-left: 0.5em; - padding-right: 5px; -} - -dl.field-list > dt:after { - content: ":"; -} - -dl.field-list > dd { - padding-left: 0.5em; - margin-top: 0em; - margin-left: 0em; - margin-bottom: 0em; -} - -dl { - margin-bottom: 15px; -} - -dd > p:first-child { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dt:target, span.highlighted { - background-color: #fbe54e; -} - -rect.highlighted { - fill: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -.classifier:before { - font-style: normal; - margin: 0.5em; - content: ":"; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -span.pre { - -moz-hyphens: none; - -ms-hyphens: none; - -webkit-hyphens: none; - hyphens: none; -} - -td.linenos pre { - padding: 5px 0px; - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - margin-left: 0.5em; -} - -table.highlighttable td { - padding: 0 0.5em 0 0.5em; -} - -div.code-block-caption { - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -div.code-block-caption + div > div.highlight > pre { - margin-top: 0; -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - padding: 1em 1em 0; -} - -div.literal-block-wrapper div.highlight { - margin: 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -span.eqno a.headerlink { - position: relative; - left: 0px; - z-index: 1; -} - -div.math:hover a.headerlink { - visibility: visible; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} \ No newline at end of file diff --git a/doc/_build/html/_static/bodybg.png b/doc/_build/html/_static/bodybg.png deleted file mode 100644 index 46892b8..0000000 Binary files a/doc/_build/html/_static/bodybg.png and /dev/null differ diff --git a/doc/_build/html/_static/demo.css b/doc/_build/html/_static/demo.css deleted file mode 100644 index 9344291..0000000 --- a/doc/_build/html/_static/demo.css +++ /dev/null @@ -1,38 +0,0 @@ -#try { - background-color: #f6f6f6; - border-radius: 0; - border: 1px solid #ccc; - margin-top: 15px; - padding: 10px 15px 5px 10px; - position: relative; -} - -#try h2 { - margin-top: 0; -} - -#try textarea { - border: 1px solid #999; - padding: 2px; - width: 100%; - min-height: 150px; -} - -#hlcode pre { - background-color: transparent; - border-radius: 0; -} - -#loading { - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; - margin: auto auto; - background-color: #cccccccc; - display: flex; - flex-direction: column; - justify-content: center; - text-align: center; -} diff --git a/doc/_build/html/_static/demo.js b/doc/_build/html/_static/demo.js deleted file mode 100644 index f538492..0000000 --- a/doc/_build/html/_static/demo.js +++ /dev/null @@ -1,100 +0,0 @@ -languagePluginLoader.then(() => { - // pyodide is now ready to use... - pyodide.loadPackage('Pygments').then(() => { - pyodide.runPython('import pygments.lexers, pygments.formatters.html, pygments.styles'); - - var lexerlist = pyodide.runPython('list(pygments.lexers.get_all_lexers())'); - var sel = document.getElementById("lang"); - for (lex of lexerlist) { - var opt = document.createElement("option"); - opt.text = lex[0]; - opt.value = lex[1][0]; - sel.add(opt); - } - - var stylelist = pyodide.runPython('list(pygments.styles.get_all_styles())'); - var sel = document.getElementById("style"); - for (sty of stylelist) { - if (sty != "default") { - var opt = document.createElement("option"); - opt.text = sty; - opt.value = sty; - sel.add(opt); - } - } - - document.getElementById("hlbtn").disabled = false; - document.getElementById("loading").style.display = "none"; - }); -}); - -function new_file() { - pyodide.globals['fname'] = document.getElementById("file").files[0].name; - var alias = pyodide.runPython('pygments.lexers.find_lexer_class_for_filename(fname).aliases[0]'); - var sel = document.getElementById("lang"); - for (var i = 0; i < sel.length; i++) { - if (sel.options[i].value == alias) { - sel.selectedIndex = i; - reset_err_hl(); - break; - } - } -} - -function reset_err_hl() { - document.getElementById("aroundlang").style.backgroundColor = null; -} - -function highlight() { - var select = document.getElementById("lang"); - var alias = select.options.item(select.selectedIndex).value - - if (alias == "") { - document.getElementById("aroundlang").style.backgroundColor = "#ffcccc"; - return; - } - pyodide.globals['alias'] = alias; - - var select = document.getElementById("style"); - pyodide.globals['style'] = select.options.item(select.selectedIndex).value; - - pyodide.runPython('lexer = pygments.lexers.get_lexer_by_name(alias)'); - pyodide.runPython('fmter = pygments.formatters.html.HtmlFormatter(noclasses=True, style=style)'); - - var file = document.getElementById("file").files[0]; - if (file) { - file.arrayBuffer().then(function(buf) { - pyodide.globals['code_mem'] = buf; - pyodide.runPython('code = bytes(code_mem)'); - highlight_now(); - }); - } else { - pyodide.globals['code'] = document.getElementById("code").value; - highlight_now(); - } -} - -function highlight_now() { - var out = document.getElementById("hlcode"); - out.innerHTML = pyodide.runPython('pygments.highlight(code, lexer, fmter)'); - document.location.hash = "#try"; - document.getElementById("hlcodedl").style.display = "block"; -} - -function download_code() { - var filename = "highlighted.html"; - var hlcode = document.getElementById("hlcode").innerHTML; - var blob = new Blob([hlcode], {type: 'text/html'}); - if (window.navigator.msSaveOrOpenBlob) { - window.navigator.msSaveBlob(blob, filename); - } - else{ - var elem = window.document.createElement('a'); - elem.href = window.URL.createObjectURL(blob); - elem.download = filename; - document.body.appendChild(elem); - elem.click(); - document.body.removeChild(elem); - window.URL.revokeObjectURL(elem.href); - } -} diff --git a/doc/_build/html/_static/docbg.png b/doc/_build/html/_static/docbg.png deleted file mode 100644 index 13e61f3..0000000 Binary files a/doc/_build/html/_static/docbg.png and /dev/null differ diff --git a/doc/_build/html/_static/doctools.js b/doc/_build/html/_static/doctools.js deleted file mode 100644 index b33f87f..0000000 --- a/doc/_build/html/_static/doctools.js +++ /dev/null @@ -1,314 +0,0 @@ -/* - * doctools.js - * ~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for all documentation. - * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - -/** - * make the code below compatible with browsers without - * an installed firebug like debugger -if (!window.console || !console.firebug) { - var names = ["log", "debug", "info", "warn", "error", "assert", "dir", - "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", - "profile", "profileEnd"]; - window.console = {}; - for (var i = 0; i < names.length; ++i) - window.console[names[i]] = function() {}; -} - */ - -/** - * small helper function to urldecode strings - */ -jQuery.urldecode = function(x) { - return decodeURIComponent(x).replace(/\+/g, ' '); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - var bbox = node.parentElement.getBBox(); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } - } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} - -/** - * Small JavaScript module for the documentation. - */ -var Documentation = { - - init : function() { - this.fixFirefoxAnchorBug(); - this.highlightSearchWords(); - this.initIndexTable(); - if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { - this.initOnKeyListeners(); - } - }, - - /** - * i18n support - */ - TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, - LOCALE : 'unknown', - - // gettext and ngettext don't access this so that the functions - // can safely bound to a different name (_ = Documentation.gettext) - gettext : function(string) { - var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated === 'undefined') - return string; - return (typeof translated === 'string') ? translated : translated[0]; - }, - - ngettext : function(singular, plural, n) { - var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated === 'undefined') - return (n == 1) ? singular : plural; - return translated[Documentation.PLURALEXPR(n)]; - }, - - addTranslations : function(catalog) { - for (var key in catalog.messages) - this.TRANSLATIONS[key] = catalog.messages[key]; - this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); - this.LOCALE = catalog.locale; - }, - - /** - * add context elements like header anchor links - */ - addContextElements : function() { - $('div[id] > :header:first').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this headline')). - appendTo(this); - }); - $('dt[id]').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this definition')). - appendTo(this); - }); - }, - - /** - * workaround a firefox stupidity - * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 - */ - fixFirefoxAnchorBug : function() { - if (document.location.hash && $.browser.mozilla) - window.setTimeout(function() { - document.location.href += ''; - }, 10); - }, - - /** - * highlight the search words provided in the url in the text - */ - highlightSearchWords : function() { - var params = $.getQueryParameters(); - var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; - if (terms.length) { - var body = $('div.body'); - if (!body.length) { - body = $('body'); - } - window.setTimeout(function() { - $.each(terms, function() { - body.highlightText(this.toLowerCase(), 'highlighted'); - }); - }, 10); - $('') - .appendTo($('#searchbox')); - } - }, - - /** - * init the domain index toggle buttons - */ - initIndexTable : function() { - var togglers = $('img.toggler').click(function() { - var src = $(this).attr('src'); - var idnum = $(this).attr('id').substr(7); - $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) === 'minus.png') - $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); - else - $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); - }).css('display', ''); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { - togglers.click(); - } - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords : function() { - $('#searchbox .highlight-link').fadeOut(300); - $('span.highlighted').removeClass('highlighted'); - }, - - /** - * make the url absolute - */ - makeURL : function(relativeURL) { - return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; - }, - - /** - * get the current relative url - */ - getCurrentURL : function() { - var path = document.location.pathname; - var parts = path.split(/\//); - $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this === '..') - parts.pop(); - }); - var url = parts.join('/'); - return path.substring(url.lastIndexOf('/') + 1, path.length - 1); - }, - - initOnKeyListeners: function() { - $(document).keyup(function(event) { - var activeElementType = document.activeElement.tagName; - // don't navigate when in search box or textarea - if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { - switch (event.keyCode) { - case 37: // left - var prevHref = $('link[rel="prev"]').prop('href'); - if (prevHref) { - window.location.href = prevHref; - return false; - } - case 39: // right - var nextHref = $('link[rel="next"]').prop('href'); - if (nextHref) { - window.location.href = nextHref; - return false; - } - } - } - }); - } -}; - -// quick alias for translations -_ = Documentation.gettext; - -$(document).ready(function() { - Documentation.init(); -}); diff --git a/doc/_build/html/_static/documentation_options.js b/doc/_build/html/_static/documentation_options.js deleted file mode 100644 index 6ab1660..0000000 --- a/doc/_build/html/_static/documentation_options.js +++ /dev/null @@ -1,10 +0,0 @@ -var DOCUMENTATION_OPTIONS = { - URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '2.4.2', - LANGUAGE: 'None', - COLLAPSE_INDEX: false, - FILE_SUFFIX: '.html', - HAS_SOURCE: true, - SOURCELINK_SUFFIX: '.txt', - NAVIGATION_WITH_KEYS: false -}; \ No newline at end of file diff --git a/doc/_build/html/_static/favicon.ico b/doc/_build/html/_static/favicon.ico deleted file mode 100644 index 777f617..0000000 Binary files a/doc/_build/html/_static/favicon.ico and /dev/null differ diff --git a/doc/_build/html/_static/file.png b/doc/_build/html/_static/file.png deleted file mode 100644 index a858a41..0000000 Binary files a/doc/_build/html/_static/file.png and /dev/null differ diff --git a/doc/_build/html/_static/github.png b/doc/_build/html/_static/github.png deleted file mode 100644 index 5d146ad..0000000 Binary files a/doc/_build/html/_static/github.png and /dev/null differ diff --git a/doc/_build/html/_static/jquery-3.4.1.js b/doc/_build/html/_static/jquery-3.4.1.js deleted file mode 100644 index 773ad95..0000000 --- a/doc/_build/html/_static/jquery-3.4.1.js +++ /dev/null @@ -1,10598 +0,0 @@ -/*! - * jQuery JavaScript Library v3.4.1 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2019-05-01T21:04Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var document = window.document; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var concat = arr.concat; - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - -var isFunction = function isFunction( obj ) { - - // Support: Chrome <=57, Firefox <=52 - // In some browsers, typeof returns "function" for HTML elements - // (i.e., `typeof document.createElement( "object" ) === "function"`). - // We don't want to classify *any* DOM node as a function. - return typeof obj === "function" && typeof obj.nodeType !== "number"; - }; - - -var isWindow = function isWindow( obj ) { - return obj != null && obj === obj.window; - }; - - - - - var preservedScriptAttributes = { - type: true, - src: true, - nonce: true, - noModule: true - }; - - function DOMEval( code, node, doc ) { - doc = doc || document; - - var i, val, - script = doc.createElement( "script" ); - - script.text = code; - if ( node ) { - for ( i in preservedScriptAttributes ) { - - // Support: Firefox 64+, Edge 18+ - // Some browsers don't support the "nonce" property on scripts. - // On the other hand, just using `getAttribute` is not enough as - // the `nonce` attribute is reset to an empty string whenever it - // becomes browsing-context connected. - // See https://github.com/whatwg/html/issues/2369 - // See https://html.spec.whatwg.org/#nonce-attributes - // The `node.getAttribute` check was added for the sake of - // `jQuery.globalEval` so that it can fake a nonce-containing node - // via an object. - val = node[ i ] || node.getAttribute && node.getAttribute( i ); - if ( val ) { - script.setAttribute( i, val ); - } - } - } - doc.head.appendChild( script ).parentNode.removeChild( script ); - } - - -function toType( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; -} -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.4.1", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }, - - // Support: Android <=4.0 only - // Make sure we trim BOM and NBSP - rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - copy = options[ name ]; - - // Prevent Object.prototype pollution - // Prevent never-ending loop - if ( name === "__proto__" || target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = Array.isArray( copy ) ) ) ) { - src = target[ name ]; - - // Ensure proper type for the source value - if ( copyIsArray && !Array.isArray( src ) ) { - clone = []; - } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { - clone = {}; - } else { - clone = src; - } - copyIsArray = false; - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - // Evaluates a script in a global context - globalEval: function( code, options ) { - DOMEval( code, { nonce: options && options.nonce } ); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // Support: Android <=4.0 only - trim: function( text ) { - return text == null ? - "" : - ( text + "" ).replace( rtrim, "" ); - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return concat.apply( [], ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), -function( i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -} ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = toType( obj ); - - if ( isFunction( obj ) || isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.4 - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://js.foundation/ - * - * Date: 2019-04-08 - */ -(function( window ) { - -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - nonnativeSelectorCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ({}).hasOwnProperty, - arr = [], - pop = arr.pop, - push_native = arr.push, - push = arr.push, - slice = arr.slice, - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[i] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier - identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + - "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), - rdescend = new RegExp( whitespace + "|>" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + - "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + - "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + - whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rhtml = /HTML$/i, - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), - funescape = function( _, escaped, escapedWhitespace ) { - var high = "0x" + escaped - 0x10000; - // NaN means non-codepoint - // Support: Firefox<24 - // Workaround erroneous numeric interpretation of +"0x" - return high !== high || escapedWhitespace ? - escaped : - high < 0 ? - // BMP codepoint - String.fromCharCode( high + 0x10000 ) : - // Supplemental Plane codepoint (surrogate pair) - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - inDisabledFieldset = addCombinator( - function( elem ) { - return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - (arr = slice.call( preferredDoc.childNodes )), - preferredDoc.childNodes - ); - // Support: Android<4.0 - // Detect silently failing push.apply - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - push_native.apply( target, slice.call(els) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - // Can't trust NodeList.length - while ( (target[j++] = els[i++]) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - - if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { - setDocument( context ); - } - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { - - // ID selector - if ( (m = match[1]) ) { - - // Document context - if ( nodeType === 9 ) { - if ( (elem = context.getElementById( m )) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && (elem = newContext.getElementById( m )) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[2] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( (m = match[3]) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !nonnativeSelectorCache[ selector + " " ] && - (!rbuggyQSA || !rbuggyQSA.test( selector )) && - - // Support: IE 8 only - // Exclude object elements - (nodeType !== 1 || context.nodeName.toLowerCase() !== "object") ) { - - newSelector = selector; - newContext = context; - - // qSA considers elements outside a scoping root when evaluating child or - // descendant combinators, which is not what we want. - // In such cases, we work around the behavior by prefixing every selector in the - // list with an ID selector referencing the scope context. - // Thanks to Andrew Dupont for this technique. - if ( nodeType === 1 && rdescend.test( selector ) ) { - - // Capture the context ID, setting it first if necessary - if ( (nid = context.getAttribute( "id" )) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", (nid = expando) ); - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[i] = "#" + nid + " " + toSelector( groups[i] ); - } - newSelector = groups.join( "," ); - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - } - - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - nonnativeSelectorCache( selector, true ); - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return (cache[ key + " " ] = value); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement("fieldset"); - - try { - return !!fn( el ); - } catch (e) { - return false; - } finally { - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split("|"), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[i] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( (cur = cur.nextSibling) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return (name === "input" || name === "button") && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11 - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - /* jshint -W018 */ - elem.isDisabled !== !disabled && - inDisabledFieldset( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction(function( argument ) { - argument = +argument; - return markFunction(function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ (j = matchIndexes[i]) ] ) { - seed[j] = !(matches[j] = seed[j]); - } - } - }); - }); -} - -/** - * Checks a node for validity as a Sizzle context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Detects XML nodes - * @param {Element|Object} elem An element or a document - * @returns {Boolean} True iff elem is a non-HTML XML node - */ -isXML = Sizzle.isXML = function( elem ) { - var namespace = elem.namespaceURI, - docElem = (elem.ownerDocument || elem).documentElement; - - // Support: IE <=8 - // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes - // https://bugs.jquery.com/ticket/4833 - return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); -}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var hasCompare, subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - docElem = document.documentElement; - documentIsHTML = !isXML( document ); - - // Support: IE 9-11, Edge - // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - if ( preferredDoc !== document && - (subWindow = document.defaultView) && subWindow.top !== subWindow ) { - - // Support: IE 11, Edge - if ( subWindow.addEventListener ) { - subWindow.addEventListener( "unload", unloadHandler, false ); - - // Support: IE 9 - 10 only - } else if ( subWindow.attachEvent ) { - subWindow.attachEvent( "onunload", unloadHandler ); - } - } - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties - // (excepting IE8 booleans) - support.attributes = assert(function( el ) { - el.className = "i"; - return !el.getAttribute("className"); - }); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert(function( el ) { - el.appendChild( document.createComment("") ); - return !el.getElementsByTagName("*").length; - }); - - // Support: IE<9 - support.getElementsByClassName = rnative.test( document.getElementsByClassName ); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert(function( el ) { - docElem.appendChild( el ).id = expando; - return !document.getElementsByName || !document.getElementsByName( expando ).length; - }); - - // ID filter and find - if ( support.getById ) { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute("id") === attrId; - }; - }; - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode("id"); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( (elem = elems[i++]) ) { - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find["TAG"] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else if ( support.qsa ) { - return context.querySelectorAll( tag ); - } - } : - - function( tag, context ) { - var elem, - tmp = [], - i = 0, - // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( (elem = results[i++]) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See https://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert(function( el ) { - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // https://bugs.jquery.com/ticket/12359 - docElem.appendChild( el ).innerHTML = "" + - ""; - - // Support: IE8, Opera 11-12.16 - // Nothing should be selected when empty strings follow ^= or $= or *= - // The test attribute must be unknown in Opera but "safe" for WinRT - // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll("[msallowcapture^='']").length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll("[selected]").length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push("~="); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll(":checked").length ) { - rbuggyQSA.push(":checked"); - } - - // Support: Safari 8+, iOS 8+ - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push(".#.+[+~]"); - } - }); - - assert(function( el ) { - el.innerHTML = "" + - ""; - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement("input"); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE8 - // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll("[name=d]").length ) { - rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( el.querySelectorAll(":enabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE9-11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll(":disabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll("*,:x"); - rbuggyQSA.push(",.*:"); - }); - } - - if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || - docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector) )) ) { - - assert(function( el ) { - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( el, "*" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( el, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - }); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); - - /* Contains - ---------------------------------------------------------------------- */ - hasCompare = rnative.test( docElem.compareDocumentPosition ); - - // Element contains another - // Purposefully self-exclusive - // As in, an element does not contain itself - contains = hasCompare || rnative.test( docElem.contains ) ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - )); - } : - function( a, b ) { - if ( b ) { - while ( (b = b.parentNode) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = hasCompare ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { - - // Choose the first element that is related to our preferred document - if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { - return -1; - } - if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } : - function( a, b ) { - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Parentless nodes are either documents or disconnected - if ( !aup || !bup ) { - return a === document ? -1 : - b === document ? 1 : - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( (cur = cur.parentNode) ) { - ap.unshift( cur ); - } - cur = b; - while ( (cur = cur.parentNode) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[i] === bp[i] ) { - i++; - } - - return i ? - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[i], bp[i] ) : - - // Otherwise nodes in our document sort first - ap[i] === preferredDoc ? -1 : - bp[i] === preferredDoc ? 1 : - 0; - }; - - return document; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - if ( support.matchesSelector && documentIsHTML && - !nonnativeSelectorCache[ expr + " " ] && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch (e) { - nonnativeSelectorCache( expr, true ); - } - } - - return Sizzle( expr, document, null, [ elem ] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - // Set document vars if needed - if ( ( context.ownerDocument || context ) !== document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val !== undefined ? - val : - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - (val = elem.getAttributeNode(name)) && val.specified ? - val.value : - null; -}; - -Sizzle.escape = function( sel ) { - return (sel + "").replace( rcssescape, fcssescape ); -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( (elem = results[i++]) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - // If no nodeType, this is expected to be an array - while ( (node = elem[i++]) ) { - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - // Use textContent for elements - // innerText usage removed for consistency of new lines (jQuery #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[1] = match[1].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); - - if ( match[2] === "~=" ) { - match[3] = " " + match[3] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[1] = match[1].toLowerCase(); - - if ( match[1].slice( 0, 3 ) === "nth" ) { - // nth-* requires argument - if ( !match[3] ) { - Sizzle.error( match[0] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); - match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); - - // other types prohibit arguments - } else if ( match[3] ) { - Sizzle.error( match[0] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[6] && match[2]; - - if ( matchExpr["CHILD"].test( match[0] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[3] ) { - match[2] = match[4] || match[5] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - // Get excess from tokenize (recursively) - (excess = tokenize( unquoted, true )) && - // advance to the next closing parenthesis - (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { - - // excess is a negative index - match[0] = match[0].slice( 0, excess ); - match[2] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { return true; } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && - classCache( className, function( elem ) { - return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); - }); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - }; - }, - - "CHILD": function( type, what, argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, context, xml ) { - var cache, uniqueCache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( (node = node[ dir ]) ) { - if ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) { - - return false; - } - } - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - - // ...in a gzip-friendly way - node = parent; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( (node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - (diff = nodeIndex = 0) || start.pop()) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - // Use previously-cached element index if available - if ( useCache ) { - // ...in a gzip-friendly way - node = elem; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - // Use the same loop as above to seek `elem` from the start - while ( (node = ++nodeIndex && node && node[ dir ] || - (diff = nodeIndex = 0) || start.pop()) ) { - - if ( ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - uniqueCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction(function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf( seed, matched[i] ); - seed[ idx ] = !( matches[ idx ] = matched[i] ); - } - }) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - // Potentially complex pseudos - "not": markFunction(function( selector ) { - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction(function( seed, matches, context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( (elem = unmatched[i]) ) { - seed[i] = !(matches[i] = elem); - } - } - }) : - function( elem, context, xml ) { - input[0] = elem; - matcher( input, null, xml, results ); - // Don't keep the element (issue #299) - input[0] = null; - return !results.pop(); - }; - }), - - "has": markFunction(function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - }), - - "contains": markFunction(function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; - }; - }), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - // lang value must be a valid identifier - if ( !ridentifier.test(lang || "") ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( (elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); - return false; - }; - }), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); - }, - - // Boolean properties - "enabled": createDisabledPseudo( false ), - "disabled": createDisabledPseudo( true ), - - "checked": function( elem ) { - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); - }, - - "selected": function( elem ) { - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos["empty"]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - - // Support: IE<8 - // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - "first": createPositionalPseudo(function() { - return [ 0 ]; - }), - - "last": createPositionalPseudo(function( matchIndexes, length ) { - return [ length - 1 ]; - }), - - "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - }), - - "even": createPositionalPseudo(function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "odd": createPositionalPseudo(function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? - argument + length : - argument > length ? - length : - argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }) - } -}; - -Expr.pseudos["nth"] = Expr.pseudos["eq"]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -tokenize = Sizzle.tokenize = function( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || (match = rcomma.exec( soFar )) ) { - if ( match ) { - // Don't consume trailing commas as valid - soFar = soFar.slice( match[0].length ) || soFar; - } - groups.push( (tokens = []) ); - } - - matched = false; - - // Combinators - if ( (match = rcombinators.exec( soFar )) ) { - matched = match.shift(); - tokens.push({ - value: matched, - // Cast descendant combinators to space - type: match[0].replace( rtrim, " " ) - }); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || - (match = preFilters[ type ]( match ))) ) { - matched = match.shift(); - tokens.push({ - value: matched, - type: type, - matches: match - }); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -}; - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[i].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, uniqueCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || (elem[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); - - if ( skip && skip === elem.nodeName.toLowerCase() ) { - elem = elem[ dir ] || elem; - } else if ( (oldCache = uniqueCache[ key ]) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return (newCache[ 2 ] = oldCache[ 2 ]); - } else { - // Reuse newcache so results back-propagate to previous elements - uniqueCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[i]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[0]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[i], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( (elem = unmatched[i]) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction(function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( (elem = temp[i]) ) { - matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) ) { - // Restore matcherIn since elem is not yet a final match - temp.push( (matcherIn[i] = elem) ); - } - } - postFinder( null, (matcherOut = []), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) && - (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { - - seed[temp] = !(results[temp] = elem); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - }); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[0].type ], - implicitRelative = leadingRelative || Expr.relative[" "], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - (checkContext = context).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - // Avoid hanging onto element (issue #299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( (matcher = Expr.relative[ tokens[i].type ]) ) { - matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; - } else { - matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[j].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), - len = elems.length; - - if ( outermost ) { - outermostContext = context === document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: IE<9, Safari - // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && (elem = elems[i]) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - if ( !context && elem.ownerDocument !== document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( (matcher = elementMatchers[j++]) ) { - if ( matcher( elem, context || document, xml) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - // They will have gone through all possible matchers - if ( (elem = !matcher && elem) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( (matcher = setMatchers[j++]) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !(unmatched[i] || setMatched[i]) ) { - setMatched[i] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[i] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -}; - -/** - * A low-level selection function that works with Sizzle's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with Sizzle.compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -select = Sizzle.select = function( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( (selector = compiled.selector || selector) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[0] = match[0].slice( 0 ); - if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { - - context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[i]; - - // Abort if we hit a combinator - if ( Expr.relative[ (type = token.type) ] ) { - break; - } - if ( (find = Expr.find[ type ]) ) { - // Search, expanding context for leading sibling combinators - if ( (seed = find( - token.matches[0].replace( runescape, funescape ), - rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context - )) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -}; - -// One-time assignments - -// Sort stability -support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; - -// Support: Chrome 14-35+ -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = !!hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert(function( el ) { - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; -}); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert(function( el ) { - el.innerHTML = ""; - return el.firstChild.getAttribute("href") === "#" ; -}) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - }); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert(function( el ) { - el.innerHTML = ""; - el.firstChild.setAttribute( "value", "" ); - return el.firstChild.getAttribute( "value" ) === ""; -}) ) { - addHandle( "value", function( elem, name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - }); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert(function( el ) { - return el.getAttribute("disabled") == null; -}) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return elem[ name ] === true ? name.toLowerCase() : - (val = elem.getAttributeNode( name )) && val.specified ? - val.value : - null; - } - }); -} - -return Sizzle; - -})( window ); - - - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; -jQuery.escapeSelector = Sizzle.escape; - - - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - - - -function nodeName( elem, name ) { - - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - -}; -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Filtered directly for both simple and complex selectors - return jQuery.filter( qualifier, elements, not ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - if ( typeof elem.contentDocument !== "undefined" ) { - return elem.contentDocument; - } - - // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only - // Treat the template element as a regular one in browsers that - // don't support it. - if ( nodeName( elem, "template" ) ) { - elem = elem.content || elem; - } - - return jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = locked || options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && toType( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject, noValue ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: - // * false: [ value ].slice( 0 ) => resolve( value ) - // * true: [ value ].slice( 1 ) => resolve() - resolve.apply( undefined, [ value ].slice( noValue ) ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.apply( undefined, [ value ] ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.stackTrace ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the stack, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getStackHook ) { - process.stackTrace = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // rejected_handlers.disable - // fulfilled_handlers.disable - tuples[ 3 - i ][ 3 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock, - - // progress_handlers.lock - tuples[ 0 ][ 3 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the master Deferred - master = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - master.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, - !remaining ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( master.state() === "pending" || - isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return master.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); - } - - return master.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -jQuery.Deferred.exceptionHook = function( error, stack ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( toType( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; - - -// Matches dashed string for camelizing -var rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g; - -// Used by camelCase as callback to replace() -function fcamelCase( all, letter ) { - return letter.toUpperCase(); -} - -// Convert dashed to camelCase; used by the css and data modules -// Support: IE <=9 - 11, Edge 12 - 15 -// Microsoft forgot to hump their vendor prefix (#9572) -function camelCase( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); -} -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see #8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( Array.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( camelCase ); - } else { - key = camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (#14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || Array.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var documentElement = document.documentElement; - - - - var isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ); - }, - composed = { composed: true }; - - // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only - // Check attachment across shadow DOM boundaries when possible (gh-3504) - // Support: iOS 10.0-10.2 only - // Early iOS 10 versions support `attachShadow` but not `getRootNode`, - // leading to errors. We need to check for `getRootNode`. - if ( documentElement.getRootNode ) { - isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ) || - elem.getRootNode( composed ) === elem.ownerDocument; - }; - } -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - isAttached( elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - -var swap = function( elem, options, callback, args ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.apply( elem, args || [] ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, scale, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = elem.nodeType && - ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Support: Firefox <=54 - // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) - initial = initial / 2; - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - while ( maxIterations-- ) { - - // Evaluate and update our best guess (doubling guesses that zero out). - // Finish if the scale equals or crosses 1 (making the old*new product non-positive). - jQuery.style( elem, prop, initialInUnit + unit ); - if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { - maxIterations = 0; - } - initialInUnit = initialInUnit / scale; - - } - - initialInUnit = initialInUnit * 2; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); - -var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); - - - -// We have to close these tags to support XHTML (#13200) -var wrapMap = { - - // Support: IE <=9 only - option: [ 1, "" ], - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "", "
" ], - col: [ 2, "", "
" ], - tr: [ 2, "", "
" ], - td: [ 3, "", "
" ], - - _default: [ 0, "", "" ] -}; - -// Support: IE <=9 only -wrapMap.optgroup = wrapMap.option; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (#15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, attached, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( toType( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (#12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - attached = isAttached( elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( attached ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; -} )(); - - -var - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, - rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -// Support: IE <=9 - 11+ -// focus() and blur() are asynchronous, except when they are no-op. -// So expect focus to be synchronous when the element is already active, -// and blur to be synchronous when the element is not already active. -// (focus and blur are always synchronous in other supported browsers, -// this just defines when we can count on it). -function expectSync( elem, type ) { - return ( elem === safeActiveElement() ) === ( type === "focus" ); -} - -// Support: IE <=9 only -// Accessing document.activeElement can throw unexpectedly -// https://bugs.jquery.com/ticket/13393 -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Don't attach events to noData or text/comment nodes (but allow plain objects) - if ( !elemData ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = {}; - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - // Make a writable jQuery.Event from the native event object - var event = jQuery.event.fix( nativeEvent ); - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // If the event is namespaced, then each handler is only invoked if it is - // specially universal or its namespaces are a superset of the event's. - if ( !event.rnamespace || handleObj.namespace === false || - event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - click: { - - // Utilize native event to ensure correct state for checkable inputs - setup: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Claim the first handler - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - // dataPriv.set( el, "click", ... ) - leverageNative( el, "click", returnTrue ); - } - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Force setup before triggering a click - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - leverageNative( el, "click" ); - } - - // Return non-false to allow normal event-path propagation - return true; - }, - - // For cross-browser consistency, suppress native .click() on links - // Also prevent it if we're currently inside a leveraged native-event stack - _default: function( event ) { - var target = event.target; - return rcheckableType.test( target.type ) && - target.click && nodeName( target, "input" ) && - dataPriv.get( target, "click" ) || - nodeName( target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -// Ensure the presence of an event listener that handles manually-triggered -// synthetic events by interrupting progress until reinvoked in response to -// *native* events that it fires directly, ensuring that state changes have -// already occurred before other listeners are invoked. -function leverageNative( el, type, expectSync ) { - - // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add - if ( !expectSync ) { - if ( dataPriv.get( el, type ) === undefined ) { - jQuery.event.add( el, type, returnTrue ); - } - return; - } - - // Register the controller as a special universal handler for all event namespaces - dataPriv.set( el, type, false ); - jQuery.event.add( el, type, { - namespace: false, - handler: function( event ) { - var notAsync, result, - saved = dataPriv.get( this, type ); - - if ( ( event.isTrigger & 1 ) && this[ type ] ) { - - // Interrupt processing of the outer synthetic .trigger()ed event - // Saved data should be false in such cases, but might be a leftover capture object - // from an async native handler (gh-4350) - if ( !saved.length ) { - - // Store arguments for use when handling the inner native event - // There will always be at least one argument (an event object), so this array - // will not be confused with a leftover capture object. - saved = slice.call( arguments ); - dataPriv.set( this, type, saved ); - - // Trigger the native event and capture its result - // Support: IE <=9 - 11+ - // focus() and blur() are asynchronous - notAsync = expectSync( this, type ); - this[ type ](); - result = dataPriv.get( this, type ); - if ( saved !== result || notAsync ) { - dataPriv.set( this, type, false ); - } else { - result = {}; - } - if ( saved !== result ) { - - // Cancel the outer synthetic event - event.stopImmediatePropagation(); - event.preventDefault(); - return result.value; - } - - // If this is an inner synthetic event for an event with a bubbling surrogate - // (focus or blur), assume that the surrogate already propagated from triggering the - // native event and prevent that from happening again here. - // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the - // bubbling surrogate propagates *after* the non-bubbling base), but that seems - // less bad than duplication. - } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { - event.stopPropagation(); - } - - // If this is a native event triggered above, everything is now in order - // Fire an inner synthetic event with the original arguments - } else if ( saved.length ) { - - // ...and capture the result - dataPriv.set( this, type, { - value: jQuery.event.trigger( - - // Support: IE <=9 - 11+ - // Extend with the prototype to reset the above stopImmediatePropagation() - jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), - saved.slice( 1 ), - this - ) - } ); - - // Abort handling of the native event - event.stopImmediatePropagation(); - } - } - } ); -} - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (#504, #13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || Date.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - code: true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - - which: function( event ) { - var button = event.button; - - // Add which for key events - if ( event.which == null && rkeyEvent.test( event.type ) ) { - return event.charCode != null ? event.charCode : event.keyCode; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { - if ( button & 1 ) { - return 1; - } - - if ( button & 2 ) { - return 3; - } - - if ( button & 4 ) { - return 2; - } - - return 0; - } - - return event.which; - } -}, jQuery.event.addProp ); - -jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { - jQuery.event.special[ type ] = { - - // Utilize native event if possible so blur/focus sequence is correct - setup: function() { - - // Claim the first handler - // dataPriv.set( this, "focus", ... ) - // dataPriv.set( this, "blur", ... ) - leverageNative( this, type, expectSync ); - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function() { - - // Force setup before trigger - leverageNative( this, type ); - - // Return non-false to allow normal event-path propagation - return true; - }, - - delegateType: delegateType - }; -} ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - /* eslint-disable max-len */ - - // See https://github.com/eslint/eslint/issues/3229 - rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, - - /* eslint-enable */ - - // Support: IE <=10 - 11, Edge 12 - 13 only - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -// Prefer a tbody over its parent table for containing new rows -function manipulationTarget( elem, content ) { - if ( nodeName( elem, "table" ) && - nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return jQuery( elem ).children( "tbody" )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { - elem.type = elem.type.slice( 5 ); - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.access( src ); - pdataCur = dataPriv.set( dest, pdataOld ); - events = pdataOld.events; - - if ( events ) { - delete pdataCur.handle; - pdataCur.events = {}; - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = concat.apply( [], args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - valueIsFunction = isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( valueIsFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( valueIsFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl && !node.noModule ) { - jQuery._evalUrl( node.src, { - nonce: node.nonce || node.getAttribute( "nonce" ) - } ); - } - } else { - DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && isAttached( node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html.replace( rxhtmlTag, "<$1>" ); - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = isAttached( elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (#15098, #14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - -var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - container.style.cssText = "position:absolute;left:-11111px;width:60px;" + - "margin-top:1px;padding:0;border:0"; - div.style.cssText = - "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + - "margin:auto;border:1px;padding:1px;" + - "width:60%;top:1%"; - documentElement.appendChild( container ).appendChild( div ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; - - // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 - // Some styles come back with percentage values, even though they shouldn't - div.style.right = "60%"; - pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; - - // Support: IE 9 - 11 only - // Detect misreporting of content dimensions for box-sizing:border-box elements - boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; - - // Support: IE 9 only - // Detect overflow:scroll screwiness (gh-3699) - // Support: Chrome <=64 - // Don't get tricked when zoom affects offsetWidth (gh-4029) - div.style.position = "absolute"; - scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - function roundPixelMeasures( measure ) { - return Math.round( parseFloat( measure ) ); - } - - var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, - reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (#8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - jQuery.extend( support, { - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelBoxStyles: function() { - computeStyleTests(); - return pixelBoxStylesVal; - }, - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - }, - scrollboxSize: function() { - computeStyleTests(); - return scrollboxSizeVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - - // Support: Firefox 51+ - // Retrieving style before computed somehow - // fixes an issue with getting wrong values - // on detached elements - style = elem.style; - - computed = computed || getStyles( elem ); - - // getPropertyValue is needed for: - // .css('filter') (IE 9 only, #12537) - // .css('--customProperty) (#3144) - if ( computed ) { - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !isAttached( elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style, - vendorProps = {}; - -// Return a vendor-prefixed property or undefined -function vendorPropName( name ) { - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -// Return a potentially-mapped jQuery.cssProps or vendor prefixed property -function finalPropName( name ) { - var final = jQuery.cssProps[ name ] || vendorProps[ name ]; - - if ( final ) { - return final; - } - if ( name in emptyStyle ) { - return name; - } - return vendorProps[ name ] = vendorPropName( name ) || name; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rcustomProp = /^--/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }; - -function setPositiveNumber( elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { - var i = dimension === "width" ? 1 : 0, - extra = 0, - delta = 0; - - // Adjustment may not be necessary - if ( box === ( isBorderBox ? "border" : "content" ) ) { - return 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin - if ( box === "margin" ) { - delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); - } - - // If we get here with a content-box, we're seeking "padding" or "border" or "margin" - if ( !isBorderBox ) { - - // Add padding - delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // For "border" or "margin", add border - if ( box !== "padding" ) { - delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - - // But still keep track of it otherwise - } else { - extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - - // If we get here with a border-box (content + padding + border), we're seeking "content" or - // "padding" or "margin" - } else { - - // For "content", subtract padding - if ( box === "content" ) { - delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // For "content" or "padding", subtract border - if ( box !== "margin" ) { - delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - // Account for positive content-box scroll gutter when requested by providing computedVal - if ( !isBorderBox && computedVal >= 0 ) { - - // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border - // Assuming integer scroll gutter, subtract the rest and round down - delta += Math.max( 0, Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - computedVal - - delta - - extra - - 0.5 - - // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter - // Use an explicit zero to avoid NaN (gh-3964) - ) ) || 0; - } - - return delta; -} - -function getWidthOrHeight( elem, dimension, extra ) { - - // Start with computed style - var styles = getStyles( elem ), - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). - // Fake content-box until we know it's needed to know the true value. - boxSizingNeeded = !support.boxSizingReliable() || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - valueIsBorderBox = isBorderBox, - - val = curCSS( elem, dimension, styles ), - offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); - - // Support: Firefox <=54 - // Return a confounding non-pixel value or feign ignorance, as appropriate. - if ( rnumnonpx.test( val ) ) { - if ( !extra ) { - return val; - } - val = "auto"; - } - - - // Fall back to offsetWidth/offsetHeight when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - // Support: Android <=4.1 - 4.3 only - // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) - // Support: IE 9-11 only - // Also use offsetWidth/offsetHeight for when box sizing is unreliable - // We use getClientRects() to check for hidden/disconnected. - // In those cases, the computed value can be trusted to be border-box - if ( ( !support.boxSizingReliable() && isBorderBox || - val === "auto" || - !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && - elem.getClientRects().length ) { - - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Where available, offsetWidth/offsetHeight approximate border box dimensions. - // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the - // retrieved value as a content box dimension. - valueIsBorderBox = offsetProp in elem; - if ( valueIsBorderBox ) { - val = elem[ offsetProp ]; - } - } - - // Normalize "" and auto - val = parseFloat( val ) || 0; - - // Adjust for the element's box model - return ( val + - boxModelAdjustment( - elem, - dimension, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles, - - // Provide the current computed size to request scroll gutter calculation (gh-3589) - val - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "animationIterationCount": true, - "columnCount": true, - "fillOpacity": true, - "flexGrow": true, - "flexShrink": true, - "fontWeight": true, - "gridArea": true, - "gridColumn": true, - "gridColumnEnd": true, - "gridColumnStart": true, - "gridRow": true, - "gridRowEnd": true, - "gridRowStart": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: {}, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ), - style = elem.style; - - // Make sure that we're working with the right name. We don't - // want to query the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (#7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug #9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (#7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append - // "px" to a few hardcoded values. - if ( type === "number" && !isCustomProp ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - if ( isCustomProp ) { - style.setProperty( name, value ); - } else { - style[ name ] = value; - } - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ); - - // Make sure that we're working with the right name. We don't - // want to modify the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( i, dimension ) { - jQuery.cssHooks[ dimension ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, dimension, extra ); - } ) : - getWidthOrHeight( elem, dimension, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = getStyles( elem ), - - // Only read styles.position if the test has a chance to fail - // to avoid forcing a reflow. - scrollboxSizeBuggy = !support.scrollboxSize() && - styles.position === "absolute", - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) - boxSizingNeeded = scrollboxSizeBuggy || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - subtract = extra ? - boxModelAdjustment( - elem, - dimension, - extra, - isBorderBox, - styles - ) : - 0; - - // Account for unreliable border-box dimensions by comparing offset* to computed and - // faking a content-box to get border and padding (gh-3699) - if ( isBorderBox && scrollboxSizeBuggy ) { - subtract -= Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - parseFloat( styles[ dimension ] ) - - boxModelAdjustment( elem, dimension, "border", false, styles ) - - 0.5 - ); - } - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ dimension ] = value; - value = jQuery.css( elem, dimension ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( prefix !== "margin" ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( Array.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && ( - jQuery.cssHooks[ tween.prop ] || - tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, inProgress, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function schedule() { - if ( inProgress ) { - if ( document.hidden === false && window.requestAnimationFrame ) { - window.requestAnimationFrame( schedule ); - } else { - window.setTimeout( schedule, jQuery.fx.interval ); - } - - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = Date.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 15 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY and Edge just mirrors - // the overflowX value there. - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( Array.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - // If there's more to do, yield - if ( percent < 1 && length ) { - return remaining; - } - - // If this was an empty animation, synthesize a final progress notification - if ( !length ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - } - - // Resolve the animation and report its conclusion - deferred.resolveWith( elem, [ animation ] ); - return false; - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - result.stop.bind( result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - // Attach callbacks from options - animation - .progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - return animation; -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !isFunction( easing ) && easing - }; - - // Go to the end state if fx are off - if ( jQuery.fx.off ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue && type !== false ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = Date.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Run the timer and safely remove it when done (allowing for external removal) - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - jQuery.fx.start(); -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( inProgress ) { - return; - } - - inProgress = true; - schedule(); -}; - -jQuery.fx.stop = function() { - inProgress = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -function classesToArray( value ) { - if ( Array.isArray( value ) ) { - return value; - } - if ( typeof value === "string" ) { - return value.match( rnothtmlwhite ) || []; - } - return []; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) > -1 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value, - isValidValue = type === "string" || Array.isArray( value ); - - if ( typeof stateVal === "boolean" && isValidValue ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - return this.each( function() { - var className, i, self, classNames; - - if ( isValidValue ) { - - // Toggle individual class names - i = 0; - self = jQuery( this ); - classNames = classesToArray( value ); - - while ( ( className = classNames[ i++ ] ) ) { - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, valueIsFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - valueIsFunction = isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( valueIsFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( Array.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (#14686, #14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( Array.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion - - -support.focusin = "onfocusin" in window; - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, - stopPropagationCallback = function( e ) { - e.stopPropagation(); - }; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = lastElement = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - lastElement = cur; - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - - if ( event.isPropagationStopped() ) { - lastElement.addEventListener( type, stopPropagationCallback ); - } - - elem[ type ](); - - if ( event.isPropagationStopped() ) { - lastElement.removeEventListener( type, stopPropagationCallback ); - } - - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -// Support: Firefox <=44 -// Firefox doesn't have focus(in | out) events -// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 -// -// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 -// focus(in | out) events fire after focus & blur events, -// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order -// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 -if ( !support.focusin ) { - jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler on the document while someone wants focusin/focusout - var handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ); - - if ( !attaches ) { - doc.addEventListener( orig, handler, true ); - } - dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ) - 1; - - if ( !attaches ) { - doc.removeEventListener( orig, handler, true ); - dataPriv.remove( doc, fix ); - - } else { - dataPriv.access( doc, fix, attaches ); - } - } - }; - } ); -} -var location = window.location; - -var nonce = Date.now(); - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) { - xml = undefined; - } - - if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; -}; - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( Array.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && toType( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - if ( a == null ) { - return ""; - } - - // If an array was passed in, assume that it is an array of form elements. - if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ) - .filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ) - .map( function( i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( Array.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() + " " ] = - ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) - .concat( match[ 2 ] ); - } - } - match = responseHeaders[ key.toLowerCase() + " " ]; - } - return match == null ? null : match.join( ", " ); - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 15 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available and should be processed, append data to url - if ( s.data && ( s.processData || typeof s.data === "string" ) ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - - -jQuery._evalUrl = function( url, options ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (#11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - - // Only evaluate the response if it is successful (gh-4126) - // dataFilter is not invoked for failure responses, so using it instead - // of the default converter is kludgy but it works. - converters: { - "text script": function() {} - }, - dataFilter: function( response ) { - jQuery.globalEval( response, options ); - } - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var htmlIsFunction = isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // #1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.ontimeout = - xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see #8605, #14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // #14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain or forced-by-attrs requests - if ( s.crossDomain || s.scriptAttrs ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( " - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

The full Pygments API¶

-

This page describes the Pygments API.

-
-

High-level API¶

-

Functions from the pygments module:

-
-
-pygments.lex(code, lexer)¶
-

Lex code with the lexer (must be a Lexer instance) -and return an iterable of tokens. Currently, this only calls -lexer.get_tokens().

-
- -
-
-pygments.format(tokens, formatter, outfile=None)¶
-

Format a token stream (iterable of tokens) tokens with the -formatter (must be a Formatter instance). The result is -written to outfile, or if that is None, returned as a -string.

-
- -
-
-pygments.highlight(code, lexer, formatter, outfile=None)¶
-

This is the most high-level highlighting function. -It combines lex and format in one function.

-
- -

Functions from pygments.lexers:

-
-
-pygments.lexers.get_lexer_by_name(alias, **options)¶
-

Return an instance of a Lexer subclass that has alias in its -aliases list. The lexer is given the options at its -instantiation.

-

Will raise pygments.util.ClassNotFound if no lexer with that alias is -found.

-
- -
-
-pygments.lexers.get_lexer_for_filename(fn, **options)¶
-

Return a Lexer subclass instance that has a filename pattern -matching fn. The lexer is given the options at its -instantiation.

-

Will raise pygments.util.ClassNotFound if no lexer for that filename -is found.

-
- -
-
-pygments.lexers.get_lexer_for_mimetype(mime, **options)¶
-

Return a Lexer subclass instance that has mime in its mimetype -list. The lexer is given the options at its instantiation.

-

Will raise pygments.util.ClassNotFound if not lexer for that mimetype -is found.

-
- -
-
-pygments.lexers.load_lexer_from_file(filename, lexername="CustomLexer", **options)¶
-

Return a Lexer subclass instance loaded from the provided file, relative -to the current directory. The file is expected to contain a Lexer class -named lexername (by default, CustomLexer). Users should be very careful with -the input, because this method is equivalent to running eval on the input file. -The lexer is given the options at its instantiation.

-

ClassNotFound is raised if there are any errors loading the Lexer

-
-

New in version 2.2.

-
-
- -
-
-pygments.lexers.guess_lexer(text, **options)¶
-

Return a Lexer subclass instance that’s guessed from the text in -text. For that, the analyse_text() method of every known lexer -class is called with the text as argument, and the lexer which returned the -highest value will be instantiated and returned.

-

pygments.util.ClassNotFound is raised if no lexer thinks it can -handle the content.

-
- -
-
-pygments.lexers.guess_lexer_for_filename(filename, text, **options)¶
-

As guess_lexer(), but only lexers which have a pattern in filenames -or alias_filenames that matches filename are taken into consideration.

-

pygments.util.ClassNotFound is raised if no lexer thinks it can -handle the content.

-
- -
-
-pygments.lexers.get_all_lexers()¶
-

Return an iterable over all registered lexers, yielding tuples in the -format:

-
(longname, tuple of aliases, tuple of filename patterns, tuple of mimetypes)
-
-
-
-

New in version 0.6.

-
-
- -
-
-pygments.lexers.find_lexer_class_by_name(alias)¶
-

Return the Lexer subclass that has alias in its aliases list, without -instantiating it.

-

Will raise pygments.util.ClassNotFound if no lexer with that alias is -found.

-
-

New in version 2.2.

-
-
- -
-
-pygments.lexers.find_lexer_class(name)¶
-

Return the Lexer subclass that with the name attribute as given by -the name argument.

-
- -

Functions from pygments.formatters:

-
-
-pygments.formatters.get_formatter_by_name(alias, **options)¶
-

Return an instance of a Formatter subclass that has alias in its -aliases list. The formatter is given the options at its instantiation.

-

Will raise pygments.util.ClassNotFound if no formatter with that -alias is found.

-
- -
-
-pygments.formatters.get_formatter_for_filename(fn, **options)¶
-

Return a Formatter subclass instance that has a filename pattern -matching fn. The formatter is given the options at its instantiation.

-

Will raise pygments.util.ClassNotFound if no formatter for that filename -is found.

-
- -
-
-pygments.formatters.load_formatter_from_file(filename, formattername="CustomFormatter", **options)¶
-

Return a Formatter subclass instance loaded from the provided file, relative -to the current directory. The file is expected to contain a Formatter class -named formattername (by default, CustomFormatter). Users should be very -careful with the input, because this method is equivalent to running eval -on the input file. The formatter is given the options at its instantiation.

-

ClassNotFound is raised if there are any errors loading the Formatter

-
-

New in version 2.2.

-
-
- -

Functions from pygments.styles:

-
-
-pygments.styles.get_style_by_name(name)¶
-

Return a style class by its short name. The names of the builtin styles -are listed in pygments.styles.STYLE_MAP.

-

Will raise pygments.util.ClassNotFound if no style of that name is -found.

-
- -
-
-pygments.styles.get_all_styles()¶
-

Return an iterable over all registered styles, yielding their names.

-
-

New in version 0.6.

-
-
- -
-
-

Lexers¶

-

The base lexer class from which all lexers are derived is:

-
-
-class pygments.lexer.Lexer(**options)¶
-

The constructor takes a **keywords dictionary of options. -Every subclass must first process its own options and then call -the Lexer constructor, since it processes the stripnl, -stripall and tabsize options.

-

An example looks like this:

-
def __init__(self, **options):
-    self.compress = options.get('compress', '')
-    Lexer.__init__(self, **options)
-
-
-

As these options must all be specifiable as strings (due to the -command line usage), there are various utility functions -available to help with that, see Option processing.

-
-
-get_tokens(text)¶
-

This method is the basic interface of a lexer. It is called by -the highlight() function. It must process the text and return an -iterable of (tokentype, value) pairs from text.

-

Normally, you don’t need to override this method. The default -implementation processes the stripnl, stripall and tabsize -options and then yields all tokens from get_tokens_unprocessed(), -with the index dropped.

-
- -
-
-get_tokens_unprocessed(text)¶
-

This method should process the text and return an iterable of -(index, tokentype, value) tuples where index is the starting -position of the token within the input text.

-

This method must be overridden by subclasses.

-
- -
-
-static analyse_text(text)¶
-

A static method which is called for lexer guessing. It should analyse -the text and return a float in the range from 0.0 to 1.0. -If it returns 0.0, the lexer will not be selected as the most -probable one, if it returns 1.0, it will be selected immediately.

-
-

Note

-

You don’t have to add @staticmethod to the definition of -this method, this will be taken care of by the Lexer’s metaclass.

-
-
- -

For a list of known tokens have a look at the Builtin Tokens page.

-

A lexer also can have the following attributes (in fact, they are mandatory -except alias_filenames) that are used by the builtin lookup mechanism.

-
-
-name¶
-

Full name for the lexer, in human-readable form.

-
- -
-
-aliases¶
-

A list of short, unique identifiers that can be used to lookup -the lexer from a list, e.g. using get_lexer_by_name().

-
- -
-
-filenames¶
-

A list of fnmatch patterns that match filenames which contain -content for this lexer. The patterns in this list should be unique among -all lexers.

-
- -
-
-alias_filenames¶
-

A list of fnmatch patterns that match filenames which may or may not -contain content for this lexer. This list is used by the -guess_lexer_for_filename() function, to determine which lexers -are then included in guessing the correct one. That means that -e.g. every lexer for HTML and a template language should include -\*.html in this list.

-
- -
-
-mimetypes¶
-

A list of MIME types for content that can be lexed with this -lexer.

-
- -
- -
-
-

Formatters¶

-

A formatter is derived from this class:

-
-
-class pygments.formatter.Formatter(**options)¶
-

As with lexers, this constructor processes options and then must call the -base class __init__().

-

The Formatter class recognizes the options style, full and -title. It is up to the formatter class whether it uses them.

-
-
-get_style_defs(arg='')¶
-

This method must return statements or declarations suitable to define -the current style for subsequent highlighted text (e.g. CSS classes -in the HTMLFormatter).

-

The optional argument arg can be used to modify the generation and -is formatter dependent (it is standardized because it can be given on -the command line).

-

This method is called by the -S command-line option, -the arg is then given by the -a option.

-
- -
-
-format(tokensource, outfile)¶
-

This method must format the tokens from the tokensource iterable and -write the formatted version to the file object outfile.

-

Formatter options can control how exactly the tokens are converted.

-
- -
-

New in version 0.7: A formatter must have the following attributes that are used by the -builtin lookup mechanism.

-
-
-
-name¶
-

Full name for the formatter, in human-readable form.

-
- -
-
-aliases¶
-

A list of short, unique identifiers that can be used to lookup -the formatter from a list, e.g. using get_formatter_by_name().

-
- -
-
-filenames¶
-

A list of fnmatch patterns that match filenames for which this -formatter can produce output. The patterns in this list should be unique -among all formatters.

-
- -
- -
-
-

Option processing¶

-

The pygments.util module has some utility functions usable for option -processing:

-
-
-exception pygments.util.OptionError¶
-

This exception will be raised by all option processing functions if -the type or value of the argument is not correct.

-
- -
-
-pygments.util.get_bool_opt(options, optname, default=None)¶
-

Interpret the key optname from the dictionary options as a boolean and -return it. Return default if optname is not in options.

-

The valid string values for True are 1, yes, true and -on, the ones for False are 0, no, false and off -(matched case-insensitively).

-
- -
-
-pygments.util.get_int_opt(options, optname, default=None)¶
-

As get_bool_opt(), but interpret the value as an integer.

-
- -
-
-pygments.util.get_list_opt(options, optname, default=None)¶
-

If the key optname from the dictionary options is a string, -split it at whitespace and return it. If it is already a list -or a tuple, it is returned as a list.

-
- -
-
-pygments.util.get_choice_opt(options, optname, allowed, default=None)¶
-

If the key optname from the dictionary is not in the sequence -allowed, raise an error, otherwise return it.

-
-

New in version 0.8.

-
-
- -
-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/docs/authors.html b/doc/_build/html/docs/authors.html deleted file mode 100644 index 0058940..0000000 --- a/doc/_build/html/docs/authors.html +++ /dev/null @@ -1,349 +0,0 @@ - - - - - - - Full contributor list — Pygments - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Full contributor list¶

-

Pygments is written and maintained by Georg Brandl <georg@python.org>.

-

Major developers are Tim Hatch <tim@timhatch.com> and Armin Ronacher -<armin.ronacher@active-4.com>.

-

Other contributors, listed alphabetically, are:

-
    -
  • Sam Aaron – Ioke lexer

  • -
  • Ali Afshar – image formatter

  • -
  • Thomas Aglassinger – Easytrieve, JCL, Rexx, Transact-SQL and VBScript -lexers

  • -
  • Muthiah Annamalai – Ezhil lexer

  • -
  • Kumar Appaiah – Debian control lexer

  • -
  • Andreas Amann – AppleScript lexer

  • -
  • Timothy Armstrong – Dart lexer fixes

  • -
  • Jeffrey Arnold – R/S, Rd, BUGS, Jags, and Stan lexers

  • -
  • Jeremy Ashkenas – CoffeeScript lexer

  • -
  • José Joaquín Atria – Praat lexer

  • -
  • Stefan Matthias Aust – Smalltalk lexer

  • -
  • Lucas Bajolet – Nit lexer

  • -
  • Ben Bangert – Mako lexers

  • -
  • Max Battcher – Darcs patch lexer

  • -
  • Thomas Baruchel – APL lexer

  • -
  • Tim Baumann – (Literate) Agda lexer

  • -
  • Paul Baumgart, 280 North, Inc. – Objective-J lexer

  • -
  • Michael Bayer – Myghty lexers

  • -
  • Thomas Beale – Archetype lexers

  • -
  • John Benediktsson – Factor lexer

  • -
  • Trevor Bergeron – mIRC formatter

  • -
  • Vincent Bernat – LessCSS lexer

  • -
  • Christopher Bertels – Fancy lexer

  • -
  • Sébastien Bigaret – QVT Operational lexer

  • -
  • Jarrett Billingsley – MiniD lexer

  • -
  • Adam Blinkinsop – Haskell, Redcode lexers

  • -
  • Stéphane Blondon – SGF lexer

  • -
  • Frits van Bommel – assembler lexers

  • -
  • Pierre Bourdon – bugfixes

  • -
  • Matthias Bussonnier – ANSI style handling for terminal-256 formatter

  • -
  • chebee7i – Python traceback lexer improvements

  • -
  • Hiram Chirino – Scaml and Jade lexers

  • -
  • Mauricio Caceres – SAS and Stata lexers.

  • -
  • Ian Cooper – VGL lexer

  • -
  • David Corbett – Inform, Jasmin, JSGF, Snowball, and TADS 3 lexers

  • -
  • Leaf Corcoran – MoonScript lexer

  • -
  • Christopher Creutzig – MuPAD lexer

  • -
  • Daniël W. Crompton – Pike lexer

  • -
  • Pete Curry – bugfixes

  • -
  • Bryan Davis – EBNF lexer

  • -
  • Bruno Deferrari – Shen lexer

  • -
  • Giedrius Dubinskas – HTML formatter improvements

  • -
  • Owen Durni – Haxe lexer

  • -
  • Alexander Dutton, Oxford University Computing Services – SPARQL lexer

  • -
  • James Edwards – Terraform lexer

  • -
  • Nick Efford – Python 3 lexer

  • -
  • Sven Efftinge – Xtend lexer

  • -
  • Artem Egorkine – terminal256 formatter

  • -
  • Matthew Fernandez – CAmkES lexer

  • -
  • Michael Ficarra – CPSA lexer

  • -
  • James H. Fisher – PostScript lexer

  • -
  • William S. Fulton – SWIG lexer

  • -
  • Carlos Galdino – Elixir and Elixir Console lexers

  • -
  • Michael Galloy – IDL lexer

  • -
  • Naveen Garg – Autohotkey lexer

  • -
  • Laurent Gautier – R/S lexer

  • -
  • Alex Gaynor – PyPy log lexer

  • -
  • Richard Gerkin – Igor Pro lexer

  • -
  • Alain Gilbert – TypeScript lexer

  • -
  • Alex Gilding – BlitzBasic lexer

  • -
  • Bertrand Goetzmann – Groovy lexer

  • -
  • Krzysiek Goj – Scala lexer

  • -
  • Andrey Golovizin – BibTeX lexers

  • -
  • Matt Good – Genshi, Cheetah lexers

  • -
  • Michał Górny – vim modeline support

  • -
  • Alex Gosse – TrafficScript lexer

  • -
  • Patrick Gotthardt – PHP namespaces support

  • -
  • Olivier Guibe – Asymptote lexer

  • -
  • Phil Hagelberg – Fennel lexer

  • -
  • Florian Hahn – Boogie lexer

  • -
  • Martin Harriman – SNOBOL lexer

  • -
  • Matthew Harrison – SVG formatter

  • -
  • Steven Hazel – Tcl lexer

  • -
  • Dan Michael Heggø – Turtle lexer

  • -
  • Aslak Hellesøy – Gherkin lexer

  • -
  • Greg Hendershott – Racket lexer

  • -
  • Justin Hendrick – ParaSail lexer

  • -
  • Jordi Gutiérrez Hermoso – Octave lexer

  • -
  • David Hess, Fish Software, Inc. – Objective-J lexer

  • -
  • Varun Hiremath – Debian control lexer

  • -
  • Rob Hoelz – Perl 6 lexer

  • -
  • Doug Hogan – Mscgen lexer

  • -
  • Ben Hollis – Mason lexer

  • -
  • Max Horn – GAP lexer

  • -
  • Alastair Houghton – Lexer inheritance facility

  • -
  • Tim Howard – BlitzMax lexer

  • -
  • Dustin Howett – Logos lexer

  • -
  • Ivan Inozemtsev – Fantom lexer

  • -
  • Hiroaki Itoh – Shell console rewrite, Lexers for PowerShell session, -MSDOS session, BC, WDiff

  • -
  • Brian R. Jackson – Tea lexer

  • -
  • Christian Jann – ShellSession lexer

  • -
  • Dennis Kaarsemaker – sources.list lexer

  • -
  • Dmitri Kabak – Inferno Limbo lexer

  • -
  • Igor Kalnitsky – vhdl lexer

  • -
  • Alexander Kit – MaskJS lexer

  • -
  • Pekka Klärck – Robot Framework lexer

  • -
  • Gerwin Klein – Isabelle lexer

  • -
  • Eric Knibbe – Lasso lexer

  • -
  • Stepan Koltsov – Clay lexer

  • -
  • Adam Koprowski – Opa lexer

  • -
  • Benjamin Kowarsch – Modula-2 lexer

  • -
  • Domen Kožar – Nix lexer

  • -
  • Oleh Krekel – Emacs Lisp lexer

  • -
  • Alexander Kriegisch – Kconfig and AspectJ lexers

  • -
  • Marek Kubica – Scheme lexer

  • -
  • Jochen Kupperschmidt – Markdown processor

  • -
  • Gerd Kurzbach – Modelica lexer

  • -
  • Jon Larimer, Google Inc. – Smali lexer

  • -
  • Olov Lassus – Dart lexer

  • -
  • Matt Layman – TAP lexer

  • -
  • Kristian Lyngstøl – Varnish lexers

  • -
  • Sylvestre Ledru – Scilab lexer

  • -
  • Chee Sing Lee – Flatline lexer

  • -
  • Mark Lee – Vala lexer

  • -
  • Valentin Lorentz – C++ lexer improvements

  • -
  • Ben Mabey – Gherkin lexer

  • -
  • Angus MacArthur – QML lexer

  • -
  • Louis Mandel – X10 lexer

  • -
  • Louis Marchand – Eiffel lexer

  • -
  • Simone Margaritelli – Hybris lexer

  • -
  • Kirk McDonald – D lexer

  • -
  • Gordon McGregor – SystemVerilog lexer

  • -
  • Stephen McKamey – Duel/JBST lexer

  • -
  • Brian McKenna – F# lexer

  • -
  • Charles McLaughlin – Puppet lexer

  • -
  • Kurt McKee – Tera Term macro lexer

  • -
  • Lukas Meuser – BBCode formatter, Lua lexer

  • -
  • Cat Miller – Pig lexer

  • -
  • Paul Miller – LiveScript lexer

  • -
  • Hong Minhee – HTTP lexer

  • -
  • Michael Mior – Awk lexer

  • -
  • Bruce Mitchener – Dylan lexer rewrite

  • -
  • Reuben Morais – SourcePawn lexer

  • -
  • Jon Morton – Rust lexer

  • -
  • Paulo Moura – Logtalk lexer

  • -
  • Mher Movsisyan – DTD lexer

  • -
  • Dejan Muhamedagic – Crmsh lexer

  • -
  • Ana Nelson – Ragel, ANTLR, R console lexers

  • -
  • Kurt Neufeld – Markdown lexer

  • -
  • Nam T. Nguyen – Monokai style

  • -
  • Jesper Noehr – HTML formatter “anchorlinenos”

  • -
  • Mike Nolta – Julia lexer

  • -
  • Jonas Obrist – BBCode lexer

  • -
  • Edward O’Callaghan – Cryptol lexer

  • -
  • David Oliva – Rebol lexer

  • -
  • Pat Pannuto – nesC lexer

  • -
  • Jon Parise – Protocol buffers and Thrift lexers

  • -
  • Benjamin Peterson – Test suite refactoring

  • -
  • Ronny Pfannschmidt – BBCode lexer

  • -
  • Dominik Picheta – Nimrod lexer

  • -
  • Andrew Pinkham – RTF Formatter Refactoring

  • -
  • Clément Prévost – UrbiScript lexer

  • -
  • Tanner Prynn – cmdline -x option and loading lexers from files

  • -
  • Oleh Prypin – Crystal lexer (based on Ruby lexer)

  • -
  • Elias Rabel – Fortran fixed form lexer

  • -
  • raichoo – Idris lexer

  • -
  • Kashif Rasul – CUDA lexer

  • -
  • Nathan Reed – HLSL lexer

  • -
  • Justin Reidy – MXML lexer

  • -
  • Norman Richards – JSON lexer

  • -
  • Corey Richardson – Rust lexer updates

  • -
  • Lubomir Rintel – GoodData MAQL and CL lexers

  • -
  • Andre Roberge – Tango style

  • -
  • Georg Rollinger – HSAIL lexer

  • -
  • Michiel Roos – TypoScript lexer

  • -
  • Konrad Rudolph – LaTeX formatter enhancements

  • -
  • Mario Ruggier – Evoque lexers

  • -
  • Miikka Salminen – Lovelace style, Hexdump lexer, lexer enhancements

  • -
  • Stou Sandalski – NumPy, FORTRAN, tcsh and XSLT lexers

  • -
  • Matteo Sasso – Common Lisp lexer

  • -
  • Joe Schafer – Ada lexer

  • -
  • Ken Schutte – Matlab lexers

  • -
  • René Schwaiger – Rainbow Dash style

  • -
  • Sebastian Schweizer – Whiley lexer

  • -
  • Tassilo Schweyer – Io, MOOCode lexers

  • -
  • Ted Shaw – AutoIt lexer

  • -
  • Joerg Sieker – ABAP lexer

  • -
  • Robert Simmons – Standard ML lexer

  • -
  • Kirill Simonov – YAML lexer

  • -
  • Corbin Simpson – Monte lexer

  • -
  • Alexander Smishlajev – Visual FoxPro lexer

  • -
  • Steve Spigarelli – XQuery lexer

  • -
  • Jerome St-Louis – eC lexer

  • -
  • Camil Staps – Clean and NuSMV lexers; Solarized style

  • -
  • James Strachan – Kotlin lexer

  • -
  • Tom Stuart – Treetop lexer

  • -
  • Colin Sullivan – SuperCollider lexer

  • -
  • Ben Swift – Extempore lexer

  • -
  • Edoardo Tenani – Arduino lexer

  • -
  • Tiberius Teng – default style overhaul

  • -
  • Jeremy Thurgood – Erlang, Squid config lexers

  • -
  • Brian Tiffin – OpenCOBOL lexer

  • -
  • Bob Tolbert – Hy lexer

  • -
  • Matthias Trute – Forth lexer

  • -
  • Erick Tryzelaar – Felix lexer

  • -
  • Alexander Udalov – Kotlin lexer improvements

  • -
  • Thomas Van Doren – Chapel lexer

  • -
  • Daniele Varrazzo – PostgreSQL lexers

  • -
  • Abe Voelker – OpenEdge ABL lexer

  • -
  • Pepijn de Vos – HTML formatter CTags support

  • -
  • Matthias Vallentin – Bro lexer

  • -
  • Benoît Vinot – AMPL lexer

  • -
  • Linh Vu Hong – RSL lexer

  • -
  • Nathan Weizenbaum – Haml and Sass lexers

  • -
  • Nathan Whetsell – Csound lexers

  • -
  • Dietmar Winkler – Modelica lexer

  • -
  • Nils Winter – Smalltalk lexer

  • -
  • Davy Wybiral – Clojure lexer

  • -
  • Whitney Young – ObjectiveC lexer

  • -
  • Diego Zamboni – CFengine3 lexer

  • -
  • Enrique Zamudio – Ceylon lexer

  • -
  • Alex Zimin – Nemerle lexer

  • -
  • Rob Zimmerman – Kal lexer

  • -
  • Vincent Zurczak – Roboconf lexer

  • -
  • Rostyslav Golda – FloScript lexer

  • -
  • GitHub, Inc – DASM16, Augeas, TOML, and Slash lexers

  • -
  • Simon Garnotel – FreeFem++ lexer

  • -
-

Many thanks for all contributions!

-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/docs/changelog.html b/doc/_build/html/docs/changelog.html deleted file mode 100644 index d75d677..0000000 --- a/doc/_build/html/docs/changelog.html +++ /dev/null @@ -1,1333 +0,0 @@ - - - - - - - Pygments changelog — Pygments - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Pygments changelog¶

-

Since 2.5.0, issue numbers refer to the tracker at -<https://github.com/pygments/pygments/issues>, -pull request numbers to the requests at -<https://github.com/pygments/pygments/pulls>.

-
-

Version 2.5.0¶

-
    -
  • Added lexers:

    -
      -
    • Email (PR#1246)

    • -
    • Erlang, Elxir shells (PR#823, #1521)

    • -
    • Notmuch (PR#1264)

    • -
    • Scdoc (PR#1268)

    • -
    • Solidity (#1214)

    • -
    • Zeek (new name for Bro) (PR#1269)

    • -
    • Zig (PR#820)

    • -
    -
  • -
  • Updated lexers:

    -
      -
    • Apache2 Configuration (PR#1251)

    • -
    • Bash sessions (#1253)

    • -
    • CSound (PR#1250)

    • -
    • Dart

    • -
    • Dockerfile

    • -
    • Emacs Lisp

    • -
    • Handlebars (PR#773)

    • -
    • Java (#1101, #987)

    • -
    • Logtalk (PR#1261)

    • -
    • Matlab (PR#1271)

    • -
    • Praat (PR#1277)

    • -
    • Python3 (PR#1255)

    • -
    • Ruby

    • -
    • YAML (#1528)

    • -
    • Velocity

    • -
    -
  • -
  • Added styles:

    -
      -
    • Inkpot (PR#1276)

    • -
    -
  • -
  • The PythonLexer class is now an alias for the former Python3Lexer. -The old PythonLexer is available as Python2Lexer. Same change has -been done for the PythonTracebackLexer. The python3 option for -the PythonConsoleLexer is now true by default.

  • -
  • Bump NasmLexer priority over TasmLexer for .asm files -(fixes #1326)

  • -
  • Default font in the ImageFormatter has been updated (#928, PR#1245)

  • -
  • Test suite switched to py.test, removed nose dependency (#1490)

  • -
  • Reduce TeraTerm lexer score – it used to match nearly all languages -(#1256)

  • -
  • Treat Skylark/Starlark files as Python files (PR#1259)

  • -
  • Image formatter: actually respect line_number_separator option

  • -
  • Add LICENSE file to wheel builds

  • -
  • Agda: fix lambda highlighting

  • -
  • Dart: support @ annotations

  • -
  • Dockerfile: accept FROM ... AS syntax

  • -
  • Emacs Lisp: add more string functions

  • -
  • GAS: accept registers in directive arguments

  • -
  • Java: make structural punctuation (braces, parens, colon, comma) Punctuation, not Operator (#987)

  • -
  • Java: support var contextual keyword (#1101)

  • -
  • Matlab: Fix recognition of function keyword (PR#1271)

  • -
  • Python: recognize .jy filenames (#976)

  • -
  • Python: recognize f string prefix (#1156)

  • -
  • Ruby: support squiggly heredocs

  • -
  • Shell sessions: recognize Virtualenv prompt (PR#1266)

  • -
  • Velocity: support silent reference syntax

  • -
-
-
-

Version 2.4.2¶

-

(released May 28, 2019)

-
    -
  • Fix encoding error when guessing lexer with given encoding option -(#1438)

  • -
-
-
-

Version 2.4.1¶

-

(released May 24, 2019)

-
    -
  • Updated lexers:

    -
      -
    • Coq (#1430)

    • -
    • MSDOS Session (PR#734)

    • -
    • NASM (#1517)

    • -
    • Objective-C (PR#813, #1508)

    • -
    • Prolog (#1511)

    • -
    • TypeScript (#1515)

    • -
    -
  • -
  • Support CSS variables in stylesheets (PR#814, #1356)

  • -
  • Fix F# lexer name (PR#709)

  • -
  • Fix TerminalFormatter using bold for bright text (#1480)

  • -
-
-
-

Version 2.4.0¶

-

(released May 8, 2019)

-
    -
  • Added lexers:

    -
      -
    • Augeas (PR#807)

    • -
    • BBC Basic (PR#806)

    • -
    • Boa (PR#756)

    • -
    • Charm++ CI (PR#788)

    • -
    • DASM16 (PR#807)

    • -
    • FloScript (PR#750)

    • -
    • FreeFem++ (PR#785)

    • -
    • Hspec (PR#790)

    • -
    • Pony (PR#627)

    • -
    • SGF (PR#780)

    • -
    • Slash (PR#807)

    • -
    • Slurm (PR#760)

    • -
    • Tera Term Language (PR#749)

    • -
    • TOML (PR#807)

    • -
    • Unicon (PR#731)

    • -
    • VBScript (PR#673)

    • -
    -
  • -
  • Updated lexers:

    -
      -
    • Apache2 (PR#766)

    • -
    • Cypher (PR#746)

    • -
    • LLVM (PR#792)

    • -
    • Makefiles (PR#766)

    • -
    • PHP (#1482)

    • -
    • Rust

    • -
    • SQL (PR#672)

    • -
    • Stan (PR#774)

    • -
    • Stata (PR#800)

    • -
    • Terraform (PR#787)

    • -
    • YAML

    • -
    -
  • -
  • Add solarized style (PR#708)

  • -
  • Add support for Markdown reference-style links (PR#753)

  • -
  • Add license information to generated HTML/CSS files (#1496)

  • -
  • Change ANSI color names (PR#777)

  • -
  • Fix catastrophic backtracking in the bash lexer (#1494)

  • -
  • Fix documentation failing to build using Sphinx 2.0 (#1501)

  • -
  • Fix incorrect links in the Lisp and R lexer documentation (PR#775)

  • -
  • Fix rare unicode errors on Python 2.7 (PR#798, #1492)

  • -
  • Fix lexers popping from an empty stack (#1506)

  • -
  • TypoScript uses .typoscript now (#1498)

  • -
  • Updated Trove classifiers and pip requirements (PR#799)

  • -
-
-
-

Version 2.3.1¶

-

(released Dec 16, 2018)

-
    -
  • Updated lexers:

    -
      -
    • ASM (PR#784)

    • -
    • Chapel (PR#735)

    • -
    • Clean (PR#621)

    • -
    • CSound (PR#684)

    • -
    • Elm (PR#744)

    • -
    • Fortran (PR#747)

    • -
    • GLSL (PR#740)

    • -
    • Haskell (PR#745)

    • -
    • Hy (PR#754)

    • -
    • Igor Pro (PR#764)

    • -
    • PowerShell (PR#705)

    • -
    • Python (PR#720, #1299, PR#715)

    • -
    • SLexer (PR#680)

    • -
    • YAML (PR#762, PR#724)

    • -
    -
  • -
  • Fix invalid string escape sequences

  • -
  • Fix FutureWarning introduced by regex changes in Python 3.7

  • -
-
-
-

Version 2.3.0¶

-

(released Nov 25, 2018)

-
    -
  • Added lexers:

    -
      -
    • Fennel (PR#783)

    • -
    • HLSL (PR#675)

    • -
    -
  • -
  • Updated lexers:

    -
      -
    • Dockerfile (PR#714)

    • -
    -
  • -
  • Minimum Python versions changed to 2.7 and 3.5

  • -
  • Added support for Python 3.7 generator changes (PR#772)

  • -
  • Fix incorrect token type in SCSS for single-quote strings (#1322)

  • -
  • Use terminal256 formatter if TERM contains 256 (PR#666)

  • -
  • Fix incorrect handling of GitHub style fences in Markdown (PR#741, #1389)

  • -
  • Fix %a not being highlighted in Python3 strings (PR#727)

  • -
-
-
-

Version 2.2.0¶

-

(released Jan 22, 2017)

-
    -
  • Added lexers:

    -
      -
    • AMPL

    • -
    • TypoScript (#1173)

    • -
    • Varnish config (PR#554)

    • -
    • Clean (PR#503)

    • -
    • WDiff (PR#513)

    • -
    • Flatline (PR#551)

    • -
    • Silver (PR#537)

    • -
    • HSAIL (PR#518)

    • -
    • JSGF (PR#546)

    • -
    • NCAR command language (PR#536)

    • -
    • Extempore (PR#530)

    • -
    • Cap’n Proto (PR#595)

    • -
    • Whiley (PR#573)

    • -
    • Monte (PR#592)

    • -
    • Crystal (PR#576)

    • -
    • Snowball (PR#589)

    • -
    • CapDL (PR#579)

    • -
    • NuSMV (PR#564)

    • -
    • SAS, Stata (PR#593)

    • -
    -
  • -
  • Added the ability to load lexer and formatter classes directly from files -with the -x command line option and the lexers.load_lexer_from_file() -and formatters.load_formatter_from_file() functions. (PR#559)

  • -
  • Added lexers.find_lexer_class_by_name(). (#1203)

  • -
  • Added new token types and lexing for magic methods and variables in Python -and PHP.

  • -
  • Added a new token type for string affixes and lexing for them in Python, C++ -and Postgresql lexers.

  • -
  • Added a new token type for heredoc (and similar) string delimiters and -lexing for them in C++, Perl, PHP, Postgresql and Ruby lexers.

  • -
  • Styles can now define colors with ANSI colors for use in the 256-color -terminal formatter. (PR#531)

  • -
  • Improved the CSS lexer. (#1083, #1130)

  • -
  • Added “Rainbow Dash” style. (PR#623)

  • -
  • Delay loading pkg_resources, which takes a long while to import. (PR#690)

  • -
-
-
-

Version 2.1.3¶

-

(released Mar 2, 2016)

-
    -
  • Fixed regression in Bash lexer (PR#563)

  • -
-
-
-

Version 2.1.2¶

-

(released Feb 29, 2016)

-
    -
  • Fixed Python 3 regression in image formatter (#1215)

  • -
  • Fixed regression in Bash lexer (PR#562)

  • -
-
-
-

Version 2.1.1¶

-

(relased Feb 14, 2016)

-
    -
  • Fixed Jython compatibility (#1205)

  • -
  • Fixed HTML formatter output with leading empty lines (#1111)

  • -
  • Added a mapping table for LaTeX encodings and added utf8 (#1152)

  • -
  • Fixed image formatter font searching on Macs (#1188)

  • -
  • Fixed deepcopy-ing of Token instances (#1168)

  • -
  • Fixed Julia string interpolation (#1170)

  • -
  • Fixed statefulness of HttpLexer between get_tokens calls

  • -
  • Many smaller fixes to various lexers

  • -
-
-
-

Version 2.1¶

-

(released Jan 17, 2016)

-
    -
  • Added lexers:

    -
      -
    • Emacs Lisp (PR#431)

    • -
    • Arduino (PR#442)

    • -
    • Modula-2 with multi-dialect support (#1090)

    • -
    • Fortran fixed format (PR#213)

    • -
    • Archetype Definition language (PR#483)

    • -
    • Terraform (PR#432)

    • -
    • Jcl, Easytrieve (PR#208)

    • -
    • ParaSail (PR#381)

    • -
    • Boogie (PR#420)

    • -
    • Turtle (PR#425)

    • -
    • Fish Shell (PR#422)

    • -
    • Roboconf (PR#449)

    • -
    • Test Anything Protocol (PR#428)

    • -
    • Shen (PR#385)

    • -
    • Component Pascal (PR#437)

    • -
    • SuperCollider (PR#472)

    • -
    • Shell consoles (Tcsh, PowerShell, MSDOS) (PR#479)

    • -
    • Elm and J (PR#452)

    • -
    • Crmsh (PR#440)

    • -
    • Praat (PR#492)

    • -
    • CSound (PR#494)

    • -
    • Ezhil (PR#443)

    • -
    • Thrift (PR#469)

    • -
    • QVT Operational (PR#204)

    • -
    • Hexdump (PR#508)

    • -
    • CAmkES Configuration (PR#462)

    • -
    -
  • -
  • Added styles:

    -
      -
    • Lovelace (PR#456)

    • -
    • Algol and Algol-nu (#1090)

    • -
    -
  • -
  • Added formatters:

    -
      -
    • IRC (PR#458)

    • -
    • True color (24-bit) terminal ANSI sequences (#1142) -(formatter alias: “16m”)

    • -
    -
  • -
  • New “filename” option for HTML formatter (PR#527).

  • -
  • Improved performance of the HTML formatter for long lines (PR#504).

  • -
  • Updated autopygmentize script (PR#445).

  • -
  • Fixed style inheritance for non-standard token types in HTML output.

  • -
  • Added support for async/await to Python 3 lexer.

  • -
  • Rewrote linenos option for TerminalFormatter (it’s better, but slightly -different output than before) (#1147).

  • -
  • Javascript lexer now supports most of ES6 (#1100).

  • -
  • Cocoa builtins updated for iOS 8.1 (PR#433).

  • -
  • Combined BashSessionLexer and ShellSessionLexer, new version should support -the prompt styles of either.

  • -
  • Added option to pygmentize to show a full traceback on exceptions.

  • -
  • Fixed incomplete output on Windows and Python 3 (e.g. when using iPython -Notebook) (#1153).

  • -
  • Allowed more traceback styles in Python console lexer (PR#253).

  • -
  • Added decorators to TypeScript (PR#509).

  • -
  • Fix highlighting of certain IRC logs formats (#1076).

  • -
-
-
-

Version 2.0.2¶

-

(released Jan 20, 2015)

-
    -
  • Fix Python tracebacks getting duplicated in the console lexer (#1068).

  • -
  • Backquote-delimited identifiers are now recognized in F# (#1062).

  • -
-
-
-

Version 2.0.1¶

-

(released Nov 10, 2014)

-
    -
  • Fix an encoding issue when using pygmentize with the -o option.

  • -
-
-
-

Version 2.0¶

-

(released Nov 9, 2014)

-
    -
  • Default lexer encoding is now “guess”, i.e. UTF-8 / Locale / Latin1 is -tried in that order.

  • -
  • Major update to Swift lexer (PR#410).

  • -
  • Multiple fixes to lexer guessing in conflicting cases:

    -
      -
    • recognize HTML5 by doctype

    • -
    • recognize XML by XML declaration

    • -
    • don’t recognize C/C++ as SystemVerilog

    • -
    -
  • -
  • Simplified regexes and builtin lists.

  • -
-
-
-

Version 2.0rc1¶

-

(released Oct 16, 2014)

-
    -
  • Dropped Python 2.4 and 2.5 compatibility. This is in favor of single-source -compatibility between Python 2.6, 2.7 and 3.3+.

  • -
  • New website and documentation based on Sphinx (finally!)

  • -
  • Lexers added:

    -
      -
    • APL (#969)

    • -
    • Agda and Literate Agda (PR#203)

    • -
    • Alloy (PR#355)

    • -
    • AmbientTalk

    • -
    • BlitzBasic (PR#197)

    • -
    • ChaiScript (PR#24)

    • -
    • Chapel (PR#256)

    • -
    • Cirru (PR#275)

    • -
    • Clay (PR#184)

    • -
    • ColdFusion CFC (PR#283)

    • -
    • Cryptol and Literate Cryptol (PR#344)

    • -
    • Cypher (PR#257)

    • -
    • Docker config files

    • -
    • EBNF (PR#193)

    • -
    • Eiffel (PR#273)

    • -
    • GAP (PR#311)

    • -
    • Golo (PR#309)

    • -
    • Handlebars (PR#186)

    • -
    • Hy (PR#238)

    • -
    • Idris and Literate Idris (PR#210)

    • -
    • Igor Pro (PR#172)

    • -
    • Inform 6/7 (PR#281)

    • -
    • Intel objdump (PR#279)

    • -
    • Isabelle (PR#386)

    • -
    • Jasmin (PR#349)

    • -
    • JSON-LD (PR#289)

    • -
    • Kal (PR#233)

    • -
    • Lean (PR#399)

    • -
    • LSL (PR#296)

    • -
    • Limbo (PR#291)

    • -
    • Liquid (#977)

    • -
    • MQL (PR#285)

    • -
    • MaskJS (PR#280)

    • -
    • Mozilla preprocessors

    • -
    • Mathematica (PR#245)

    • -
    • NesC (PR#166)

    • -
    • Nit (PR#375)

    • -
    • Nix (PR#267)

    • -
    • Pan

    • -
    • Pawn (PR#211)

    • -
    • Perl 6 (PR#181)

    • -
    • Pig (PR#304)

    • -
    • Pike (PR#237)

    • -
    • QBasic (PR#182)

    • -
    • Red (PR#341)

    • -
    • ResourceBundle (#1038)

    • -
    • Rexx (PR#199)

    • -
    • Rql (PR#251)

    • -
    • Rsl

    • -
    • SPARQL (PR#78)

    • -
    • Slim (PR#366)

    • -
    • Swift (PR#371)

    • -
    • Swig (PR#168)

    • -
    • TADS 3 (PR#407)

    • -
    • Todo.txt todo lists

    • -
    • Twig (PR#404)

    • -
    -
  • -
  • Added a helper to “optimize” regular expressions that match one of many -literal words; this can save 20% and more lexing time with lexers that -highlight many keywords or builtins.

  • -
  • New styles: “xcode” and “igor”, similar to the default highlighting of -the respective IDEs.

  • -
  • The command-line “pygmentize” tool now tries a little harder to find the -correct encoding for files and the terminal (#979).

  • -
  • Added “inencoding” option for lexers to override “encoding” analogous -to “outencoding” (#800).

  • -
  • Added line-by-line “streaming” mode for pygmentize with the “-s” option. -(PR#165) Only fully works for lexers that have no constructs spanning -lines!

  • -
  • Added an “envname” option to the LaTeX formatter to select a replacement -verbatim environment (PR#235).

  • -
  • Updated the Makefile lexer to yield a little more useful highlighting.

  • -
  • Lexer aliases passed to get_lexer_by_name() are now case-insensitive.

  • -
  • File name matching in lexers and formatters will now use a regex cache -for speed (PR#205).

  • -
  • Pygments will now recognize “vim” modelines when guessing the lexer for -a file based on content (PR#118).

  • -
  • Major restructure of the pygments.lexers module namespace. There are now -many more modules with less lexers per module. Old modules are still around -and re-export the lexers they previously contained.

  • -
  • The NameHighlightFilter now works with any Name.* token type (#790).

  • -
  • Python 3 lexer: add new exceptions from PEP 3151.

  • -
  • Opa lexer: add new keywords (PR#170).

  • -
  • Julia lexer: add keywords and underscore-separated number -literals (PR#176).

  • -
  • Lasso lexer: fix method highlighting, update builtins. Fix -guessing so that plain XML isn’t always taken as Lasso (PR#163).

  • -
  • Objective C/C++ lexers: allow “@” prefixing any expression (#871).

  • -
  • Ruby lexer: fix lexing of Name::Space tokens (#860) and of symbols -in hashes (#873).

  • -
  • Stan lexer: update for version 2.4.0 of the language (PR#162, PR#255, PR#377).

  • -
  • JavaScript lexer: add the “yield” keyword (PR#196).

  • -
  • HTTP lexer: support for PATCH method (PR#190).

  • -
  • Koka lexer: update to newest language spec (PR#201).

  • -
  • Haxe lexer: rewrite and support for Haxe 3 (PR#174).

  • -
  • Prolog lexer: add different kinds of numeric literals (#864).

  • -
  • F# lexer: rewrite with newest spec for F# 3.0 (#842), fix a bug with -dotted chains (#948).

  • -
  • Kotlin lexer: general update (PR#271).

  • -
  • Rebol lexer: fix comment detection and analyse_text (PR#261).

  • -
  • LLVM lexer: update keywords to v3.4 (PR#258).

  • -
  • PHP lexer: add new keywords and binary literals (PR#222).

  • -
  • external/markdown-processor.py updated to newest python-markdown (PR#221).

  • -
  • CSS lexer: some highlighting order fixes (PR#231).

  • -
  • Ceylon lexer: fix parsing of nested multiline comments (#915).

  • -
  • C family lexers: fix parsing of indented preprocessor directives (#944).

  • -
  • Rust lexer: update to 0.9 language version (PR#270, PR#388).

  • -
  • Elixir lexer: update to 0.15 language version (PR#392).

  • -
  • Fix swallowing incomplete tracebacks in Python console lexer (#874).

  • -
-
-
-

Version 1.6¶

-

(released Feb 3, 2013)

-
    -
  • Lexers added:

    -
      -
    • Dylan console (PR#149)

    • -
    • Logos (PR#150)

    • -
    • Shell sessions (PR#158)

    • -
    -
  • -
  • Fix guessed lexers not receiving lexer options (#838).

  • -
  • Fix unquoted HTML attribute lexing in Opa (#841).

  • -
  • Fixes to the Dart lexer (PR#160).

  • -
-
-
-

Version 1.6rc1¶

-

(released Jan 9, 2013)

-
    -
  • Lexers added:

    -
      -
    • AspectJ (PR#90)

    • -
    • AutoIt (PR#122)

    • -
    • BUGS-like languages (PR#89)

    • -
    • Ceylon (PR#86)

    • -
    • Croc (new name for MiniD)

    • -
    • CUDA (PR#75)

    • -
    • Dg (PR#116)

    • -
    • IDL (PR#115)

    • -
    • Jags (PR#89)

    • -
    • Julia (PR#61)

    • -
    • Kconfig (#711)

    • -
    • Lasso (PR#95, PR#113)

    • -
    • LiveScript (PR#84)

    • -
    • Monkey (PR#117)

    • -
    • Mscgen (PR#80)

    • -
    • NSIS scripts (PR#136)

    • -
    • OpenCOBOL (PR#72)

    • -
    • QML (PR#123)

    • -
    • Puppet (PR#133)

    • -
    • Racket (PR#94)

    • -
    • Rdoc (PR#99)

    • -
    • Robot Framework (PR#137)

    • -
    • RPM spec files (PR#124)

    • -
    • Rust (PR#67)

    • -
    • Smali (Dalvik assembly)

    • -
    • SourcePawn (PR#39)

    • -
    • Stan (PR#89)

    • -
    • Treetop (PR#125)

    • -
    • TypeScript (PR#114)

    • -
    • VGL (PR#12)

    • -
    • Visual FoxPro (#762)

    • -
    • Windows Registry (#819)

    • -
    • Xtend (PR#68)

    • -
    -
  • -
  • The HTML formatter now supports linking to tags using CTags files, when the -python-ctags package is installed (PR#87).

  • -
  • The HTML formatter now has a “linespans” option that wraps every line in a -<span> tag with a specific id (PR#82).

  • -
  • When deriving a lexer from another lexer with token definitions, definitions -for states not in the child lexer are now inherited. If you override a state -in the child lexer, an “inherit” keyword has been added to insert the base -state at that position (PR#141).

  • -
  • The C family lexers now inherit token definitions from a common base class, -removing code duplication (PR#141).

  • -
  • Use “colorama” on Windows for console color output (PR#142).

  • -
  • Fix Template Haskell highlighting (PR#63).

  • -
  • Fix some S/R lexer errors (PR#91).

  • -
  • Fix a bug in the Prolog lexer with names that start with ‘is’ (#810).

  • -
  • Rewrite Dylan lexer, add Dylan LID lexer (PR#147).

  • -
  • Add a Java quickstart document (PR#146).

  • -
  • Add a “external/autopygmentize” file that can be used as .lessfilter (#802).

  • -
-
-
-

Version 1.5¶

-

(codename Zeitdilatation, released Mar 10, 2012)

-
    -
  • Lexers added:

    -
      -
    • Awk (#630)

    • -
    • Fancy (#633)

    • -
    • PyPy Log

    • -
    • eC

    • -
    • Nimrod

    • -
    • Nemerle (#667)

    • -
    • F# (#353)

    • -
    • Groovy (#501)

    • -
    • PostgreSQL (#660)

    • -
    • DTD

    • -
    • Gosu (#634)

    • -
    • Octave (PR#22)

    • -
    • Standard ML (PR#14)

    • -
    • CFengine3 (#601)

    • -
    • Opa (PR#37)

    • -
    • HTTP sessions (PR#42)

    • -
    • JSON (PR#31)

    • -
    • SNOBOL (PR#30)

    • -
    • MoonScript (PR#43)

    • -
    • ECL (PR#29)

    • -
    • Urbiscript (PR#17)

    • -
    • OpenEdge ABL (PR#27)

    • -
    • SystemVerilog (PR#35)

    • -
    • Coq (#734)

    • -
    • PowerShell (#654)

    • -
    • Dart (#715)

    • -
    • Fantom (PR#36)

    • -
    • Bro (PR#5)

    • -
    • NewLISP (PR#26)

    • -
    • VHDL (PR#45)

    • -
    • Scilab (#740)

    • -
    • Elixir (PR#57)

    • -
    • Tea (PR#56)

    • -
    • Kotlin (PR#58)

    • -
    -
  • -
  • Fix Python 3 terminal highlighting with pygmentize (#691).

  • -
  • In the LaTeX formatter, escape special &, < and > chars (#648).

  • -
  • In the LaTeX formatter, fix display problems for styles with token -background colors (#670).

  • -
  • Enhancements to the Squid conf lexer (#664).

  • -
  • Several fixes to the reStructuredText lexer (#636).

  • -
  • Recognize methods in the ObjC lexer (#638).

  • -
  • Fix Lua “class” highlighting: it does not have classes (#665).

  • -
  • Fix degenerate regex in Scala lexer (#671) and highlighting bugs (#713, 708).

  • -
  • Fix number pattern order in Ocaml lexer (#647).

  • -
  • Fix generic type highlighting in ActionScript 3 (#666).

  • -
  • Fixes to the Clojure lexer (PR#9).

  • -
  • Fix degenerate regex in Nemerle lexer (#706).

  • -
  • Fix infinite looping in CoffeeScript lexer (#729).

  • -
  • Fix crashes and analysis with ObjectiveC lexer (#693, #696).

  • -
  • Add some Fortran 2003 keywords.

  • -
  • Fix Boo string regexes (#679).

  • -
  • Add “rrt” style (#727).

  • -
  • Fix infinite looping in Darcs Patch lexer.

  • -
  • Lots of misc fixes to character-eating bugs and ordering problems in many -different lexers.

  • -
-
-
-

Version 1.4¶

-

(codename Unschärfe, released Jan 03, 2011)

-
    -
  • Lexers added:

    -
      -
    • Factor (#520)

    • -
    • PostScript (#486)

    • -
    • Verilog (#491)

    • -
    • BlitzMax Basic (#478)

    • -
    • Ioke (#465)

    • -
    • Java properties, split out of the INI lexer (#445)

    • -
    • Scss (#509)

    • -
    • Duel/JBST

    • -
    • XQuery (#617)

    • -
    • Mason (#615)

    • -
    • GoodData (#609)

    • -
    • SSP (#473)

    • -
    • Autohotkey (#417)

    • -
    • Google Protocol Buffers

    • -
    • Hybris (#506)

    • -
    -
  • -
  • Do not fail in analyse_text methods (#618).

  • -
  • Performance improvements in the HTML formatter (#523).

  • -
  • With the noclasses option in the HTML formatter, some styles -present in the stylesheet were not added as inline styles.

  • -
  • Four fixes to the Lua lexer (#480, #481, #482, #497).

  • -
  • More context-sensitive Gherkin lexer with support for more i18n translations.

  • -
  • Support new OO keywords in Matlab lexer (#521).

  • -
  • Small fix in the CoffeeScript lexer (#519).

  • -
  • A bugfix for backslashes in ocaml strings (#499).

  • -
  • Fix unicode/raw docstrings in the Python lexer (#489).

  • -
  • Allow PIL to work without PIL.pth (#502).

  • -
  • Allow seconds as a unit in CSS (#496).

  • -
  • Support application/javascript as a JavaScript mime type (#504).

  • -
  • Support Offload C++ Extensions as -keywords in the C++ lexer (#484).

  • -
  • Escape more characters in LaTeX output (#505).

  • -
  • Update Haml/Sass lexers to version 3 (#509).

  • -
  • Small PHP lexer string escaping fix (#515).

  • -
  • Support comments before preprocessor directives, and unsigned/ -long long literals in C/C++ (#613, #616).

  • -
  • Support line continuations in the INI lexer (#494).

  • -
  • Fix lexing of Dylan string and char literals (#628).

  • -
  • Fix class/procedure name highlighting in VB.NET lexer (#624).

  • -
-
-
-

Version 1.3.1¶

-

(bugfix release, released Mar 05, 2010)

-
    -
  • The pygmentize script was missing from the distribution.

  • -
-
-
-

Version 1.3¶

-

(codename Schneeglöckchen, released Mar 01, 2010)

-
    -
  • Added the ensurenl lexer option, which can be used to suppress the -automatic addition of a newline to the lexer input.

  • -
  • Lexers added:

    -
      -
    • Ada

    • -
    • Coldfusion

    • -
    • Modula-2

    • -
    • Haxe

    • -
    • R console

    • -
    • Objective-J

    • -
    • Haml and Sass

    • -
    • CoffeeScript

    • -
    -
  • -
  • Enhanced reStructuredText highlighting.

  • -
  • Added support for PHP 5.3 namespaces in the PHP lexer.

  • -
  • Added a bash completion script for pygmentize, to the external/ -directory (#466).

  • -
  • Fixed a bug in do_insertions() used for multi-lexer languages.

  • -
  • Fixed a Ruby regex highlighting bug (#476).

  • -
  • Fixed regex highlighting bugs in Perl lexer (#258).

  • -
  • Add small enhancements to the C lexer (#467) and Bash lexer (#469).

  • -
  • Small fixes for the Tcl, Debian control file, Nginx config, -Smalltalk, Objective-C, Clojure, Lua lexers.

  • -
  • Gherkin lexer: Fixed single apostrophe bug and added new i18n keywords.

  • -
-
-
-

Version 1.2.2¶

-

(bugfix release, released Jan 02, 2010)

-
    -
  • Removed a backwards incompatibility in the LaTeX formatter that caused -Sphinx to produce invalid commands when writing LaTeX output (#463).

  • -
  • Fixed a forever-backtracking regex in the BashLexer (#462).

  • -
-
-
-

Version 1.2.1¶

-

(bugfix release, released Jan 02, 2010)

-
    -
  • Fixed mishandling of an ellipsis in place of the frames in a Python -console traceback, resulting in clobbered output.

  • -
-
-
-

Version 1.2¶

-

(codename Neujahr, released Jan 01, 2010)

-
    -
  • Dropped Python 2.3 compatibility.

  • -
  • Lexers added:

    -
      -
    • Asymptote

    • -
    • Go

    • -
    • Gherkin (Cucumber)

    • -
    • CMake

    • -
    • Ooc

    • -
    • Coldfusion

    • -
    • Haxe

    • -
    • R console

    • -
    -
  • -
  • Added options for rendering LaTeX in source code comments in the -LaTeX formatter (#461).

  • -
  • Updated the Logtalk lexer.

  • -
  • Added line_number_start option to image formatter (#456).

  • -
  • Added hl_lines and hl_color options to image formatter (#457).

  • -
  • Fixed the HtmlFormatter’s handling of noclasses=True to not output any -classes (#427).

  • -
  • Added the Monokai style (#453).

  • -
  • Fixed LLVM lexer identifier syntax and added new keywords (#442).

  • -
  • Fixed the PythonTracebackLexer to handle non-traceback data in header or -trailer, and support more partial tracebacks that start on line 2 (#437).

  • -
  • Fixed the CLexer to not highlight ternary statements as labels.

  • -
  • Fixed lexing of some Ruby quoting peculiarities (#460).

  • -
  • A few ASM lexer fixes (#450).

  • -
-
-
-

Version 1.1.1¶

-

(bugfix release, released Sep 15, 2009)

-
    -
  • Fixed the BBCode lexer (#435).

  • -
  • Added support for new Jinja2 keywords.

  • -
  • Fixed test suite failures.

  • -
  • Added Gentoo-specific suffixes to Bash lexer.

  • -
-
-
-

Version 1.1¶

-

(codename Brillouin, released Sep 11, 2009)

-
    -
  • Ported Pygments to Python 3. This needed a few changes in the way -encodings are handled; they may affect corner cases when used with -Python 2 as well.

  • -
  • Lexers added:

    -
      -
    • Antlr/Ragel, thanks to Ana Nelson

    • -
    • (Ba)sh shell

    • -
    • Erlang shell

    • -
    • GLSL

    • -
    • Prolog

    • -
    • Evoque

    • -
    • Modelica

    • -
    • Rebol

    • -
    • MXML

    • -
    • Cython

    • -
    • ABAP

    • -
    • ASP.net (VB/C#)

    • -
    • Vala

    • -
    • Newspeak

    • -
    -
  • -
  • Fixed the LaTeX formatter’s output so that output generated for one style -can be used with the style definitions of another (#384).

  • -
  • Added “anchorlinenos” and “noclobber_cssfile” (#396) options to HTML -formatter.

  • -
  • Support multiline strings in Lua lexer.

  • -
  • Rewrite of the JavaScript lexer by Pumbaa80 to better support regular -expression literals (#403).

  • -
  • When pygmentize is asked to highlight a file for which multiple lexers -match the filename, use the analyse_text guessing engine to determine the -winner (#355).

  • -
  • Fixed minor bugs in the JavaScript lexer (#383), the Matlab lexer (#378), -the Scala lexer (#392), the INI lexer (#391), the Clojure lexer (#387) -and the AS3 lexer (#389).

  • -
  • Fixed three Perl heredoc lexing bugs (#379, #400, #422).

  • -
  • Fixed a bug in the image formatter which misdetected lines (#380).

  • -
  • Fixed bugs lexing extended Ruby strings and regexes.

  • -
  • Fixed a bug when lexing git diffs.

  • -
  • Fixed a bug lexing the empty commit in the PHP lexer (#405).

  • -
  • Fixed a bug causing Python numbers to be mishighlighted as floats (#397).

  • -
  • Fixed a bug when backslashes are used in odd locations in Python (#395).

  • -
  • Fixed various bugs in Matlab and S-Plus lexers, thanks to Winston Chang (#410, -#411, #413, #414) and fmarc (#419).

  • -
  • Fixed a bug in Haskell single-line comment detection (#426).

  • -
  • Added new-style reStructuredText directive for docutils 0.5+ (#428).

  • -
-
-
-

Version 1.0¶

-

(codename Dreiundzwanzig, released Nov 23, 2008)

-
    -
  • Don’t use join(splitlines()) when converting newlines to \n, -because that doesn’t keep all newlines at the end when the -stripnl lexer option is False.

  • -
  • Added -N option to command-line interface to get a lexer name -for a given filename.

  • -
  • Added Tango style, written by Andre Roberge for the Crunchy project.

  • -
  • Added Python3TracebackLexer and python3 option to -PythonConsoleLexer.

  • -
  • Fixed a few bugs in the Haskell lexer.

  • -
  • Fixed PythonTracebackLexer to be able to recognize SyntaxError and -KeyboardInterrupt (#360).

  • -
  • Provide one formatter class per image format, so that surprises like:

    -
    pygmentize -f gif -o foo.gif foo.py
    -
    -
    -

    creating a PNG file are avoided.

    -
  • -
  • Actually use the font_size option of the image formatter.

  • -
  • Fixed numpy lexer that it doesn’t listen for *.py any longer.

  • -
  • Fixed HTML formatter so that text options can be Unicode -strings (#371).

  • -
  • Unified Diff lexer supports the “udiff” alias now.

  • -
  • Fixed a few issues in Scala lexer (#367).

  • -
  • RubyConsoleLexer now supports simple prompt mode (#363).

  • -
  • JavascriptLexer is smarter about what constitutes a regex (#356).

  • -
  • Add Applescript lexer, thanks to Andreas Amann (#330).

  • -
  • Make the codetags more strict about matching words (#368).

  • -
  • NginxConfLexer is a little more accurate on mimetypes and -variables (#370).

  • -
-
-
-

Version 0.11.1¶

-

(released Aug 24, 2008)

-
    -
  • Fixed a Jython compatibility issue in pygments.unistring (#358).

  • -
-
-
-

Version 0.11¶

-

(codename Straußenei, released Aug 23, 2008)

-

Many thanks go to Tim Hatch for writing or integrating most of the bug -fixes and new features.

-
    -
  • Lexers added:

    -
      -
    • Nasm-style assembly language, thanks to delroth

    • -
    • YAML, thanks to Kirill Simonov

    • -
    • ActionScript 3, thanks to Pierre Bourdon

    • -
    • Cheetah/Spitfire templates, thanks to Matt Good

    • -
    • Lighttpd config files

    • -
    • Nginx config files

    • -
    • Gnuplot plotting scripts

    • -
    • Clojure

    • -
    • POV-Ray scene files

    • -
    • Sqlite3 interactive console sessions

    • -
    • Scala source files, thanks to Krzysiek Goj

    • -
    -
  • -
  • Lexers improved:

    -
      -
    • C lexer highlights standard library functions now and supports C99 -types.

    • -
    • Bash lexer now correctly highlights heredocs without preceding -whitespace.

    • -
    • Vim lexer now highlights hex colors properly and knows a couple -more keywords.

    • -
    • Irc logs lexer now handles xchat’s default time format (#340) and -correctly highlights lines ending in >.

    • -
    • Support more delimiters for perl regular expressions (#258).

    • -
    • ObjectiveC lexer now supports 2.0 features.

    • -
    -
  • -
  • Added “Visual Studio” style.

  • -
  • Updated markdown processor to Markdown 1.7.

  • -
  • Support roman/sans/mono style defs and use them in the LaTeX -formatter.

  • -
  • The RawTokenFormatter is no longer registered to *.raw and it’s -documented that tokenization with this lexer may raise exceptions.

  • -
  • New option hl_lines to HTML formatter, to highlight certain -lines.

  • -
  • New option prestyles to HTML formatter.

  • -
  • New option -g to pygmentize, to allow lexer guessing based on -filetext (can be slowish, so file extensions are still checked -first).

  • -
  • guess_lexer() now makes its decision much faster due to a cache -of whether data is xml-like (a check which is used in several -versions of analyse_text(). Several lexers also have more -accurate analyse_text() now.

  • -
-
-
-

Version 0.10¶

-

(codename Malzeug, released May 06, 2008)

-
    -
  • Lexers added:

    -
      -
    • Io

    • -
    • Smalltalk

    • -
    • Darcs patches

    • -
    • Tcl

    • -
    • Matlab

    • -
    • Matlab sessions

    • -
    • FORTRAN

    • -
    • XSLT

    • -
    • tcsh

    • -
    • NumPy

    • -
    • Python 3

    • -
    • S, S-plus, R statistics languages

    • -
    • Logtalk

    • -
    -
  • -
  • In the LatexFormatter, the commandprefix option is now by default -‘PY’ instead of ‘C’, since the latter resulted in several collisions -with other packages. Also, the special meaning of the arg -argument to get_style_defs() was removed.

  • -
  • Added ImageFormatter, to format code as PNG, JPG, GIF or BMP. -(Needs the Python Imaging Library.)

  • -
  • Support doc comments in the PHP lexer.

  • -
  • Handle format specifications in the Perl lexer.

  • -
  • Fix comment handling in the Batch lexer.

  • -
  • Add more file name extensions for the C++, INI and XML lexers.

  • -
  • Fixes in the IRC and MuPad lexers.

  • -
  • Fix function and interface name highlighting in the Java lexer.

  • -
  • Fix at-rule handling in the CSS lexer.

  • -
  • Handle KeyboardInterrupts gracefully in pygmentize.

  • -
  • Added BlackWhiteStyle.

  • -
  • Bash lexer now correctly highlights math, does not require -whitespace after semicolons, and correctly highlights boolean -operators.

  • -
  • Makefile lexer is now capable of handling BSD and GNU make syntax.

  • -
-
-
-

Version 0.9¶

-

(codename Herbstzeitlose, released Oct 14, 2007)

-
    -
  • Lexers added:

    -
      -
    • Erlang

    • -
    • ActionScript

    • -
    • Literate Haskell

    • -
    • Common Lisp

    • -
    • Various assembly languages

    • -
    • Gettext catalogs

    • -
    • Squid configuration

    • -
    • Debian control files

    • -
    • MySQL-style SQL

    • -
    • MOOCode

    • -
    -
  • -
  • Lexers improved:

    -
      -
    • Greatly improved the Haskell and OCaml lexers.

    • -
    • Improved the Bash lexer’s handling of nested constructs.

    • -
    • The C# and Java lexers exhibited abysmal performance with some -input code; this should now be fixed.

    • -
    • The IRC logs lexer is now able to colorize weechat logs too.

    • -
    • The Lua lexer now recognizes multi-line comments.

    • -
    • Fixed bugs in the D and MiniD lexer.

    • -
    -
  • -
  • The encoding handling of the command line mode (pygmentize) was -enhanced. You shouldn’t get UnicodeErrors from it anymore if you -don’t give an encoding option.

  • -
  • Added a -P option to the command line mode which can be used to -give options whose values contain commas or equals signs.

  • -
  • Added 256-color terminal formatter.

  • -
  • Added an experimental SVG formatter.

  • -
  • Added the lineanchors option to the HTML formatter, thanks to -Ian Charnas for the idea.

  • -
  • Gave the line numbers table a CSS class in the HTML formatter.

  • -
  • Added a Vim 7-like style.

  • -
-
-
-

Version 0.8.1¶

-

(released Jun 27, 2007)

-
    -
  • Fixed POD highlighting in the Ruby lexer.

  • -
  • Fixed Unicode class and namespace name highlighting in the C# lexer.

  • -
  • Fixed Unicode string prefix highlighting in the Python lexer.

  • -
  • Fixed a bug in the D and MiniD lexers.

  • -
  • Fixed the included MoinMoin parser.

  • -
-
-
-

Version 0.8¶

-

(codename Maikäfer, released May 30, 2007)

-
    -
  • Lexers added:

    -
      -
    • Haskell, thanks to Adam Blinkinsop

    • -
    • Redcode, thanks to Adam Blinkinsop

    • -
    • D, thanks to Kirk McDonald

    • -
    • MuPad, thanks to Christopher Creutzig

    • -
    • MiniD, thanks to Jarrett Billingsley

    • -
    • Vim Script, by Tim Hatch

    • -
    -
  • -
  • The HTML formatter now has a second line-numbers mode in which it -will just integrate the numbers in the same <pre> tag as the -code.

  • -
  • The CSharpLexer now is Unicode-aware, which means that it has an -option that can be set so that it correctly lexes Unicode -identifiers allowed by the C# specs.

  • -
  • Added a RaiseOnErrorTokenFilter that raises an exception when the -lexer generates an error token, and a VisibleWhitespaceFilter that -converts whitespace (spaces, tabs, newlines) into visible -characters.

  • -
  • Fixed the do_insertions() helper function to yield correct -indices.

  • -
  • The ReST lexer now automatically highlights source code blocks in -“.. sourcecode:: language” and “.. code:: language” directive -blocks.

  • -
  • Improved the default style (thanks to Tiberius Teng). The old -default is still available as the “emacs” style (which was an alias -before).

  • -
  • The get_style_defs method of HTML formatters now uses the -cssclass option as the default selector if it was given.

  • -
  • Improved the ReST and Bash lexers a bit.

  • -
  • Fixed a few bugs in the Makefile and Bash lexers, thanks to Tim -Hatch.

  • -
  • Fixed a bug in the command line code that disallowed -O options -when using the -S option.

  • -
  • Fixed a bug in the RawTokenFormatter.

  • -
-
-
-

Version 0.7.1¶

-

(released Feb 15, 2007)

-
    -
  • Fixed little highlighting bugs in the Python, Java, Scheme and -Apache Config lexers.

  • -
  • Updated the included manpage.

  • -
  • Included a built version of the documentation in the source tarball.

  • -
-
-
-

Version 0.7¶

-

(codename Faschingskrapfn, released Feb 14, 2007)

-
    -
  • Added a MoinMoin parser that uses Pygments. With it, you get -Pygments highlighting in Moin Wiki pages.

  • -
  • Changed the exception raised if no suitable lexer, formatter etc. is -found in one of the get_*_by_* functions to a custom exception, -pygments.util.ClassNotFound. It is, however, a subclass of -ValueError in order to retain backwards compatibility.

  • -
  • Added a -H command line option which can be used to get the -docstring of a lexer, formatter or filter.

  • -
  • Made the handling of lexers and formatters more consistent. The -aliases and filename patterns of formatters are now attributes on -them.

  • -
  • Added an OCaml lexer, thanks to Adam Blinkinsop.

  • -
  • Made the HTML formatter more flexible, and easily subclassable in -order to make it easy to implement custom wrappers, e.g. alternate -line number markup. See the documentation.

  • -
  • Added an outencoding option to all formatters, making it possible -to override the encoding (which is used by lexers and formatters) -when using the command line interface. Also, if using the terminal -formatter and the output file is a terminal and has an encoding -attribute, use it if no encoding is given.

  • -
  • Made it possible to just drop style modules into the styles -subpackage of the Pygments installation.

  • -
  • Added a “state” keyword argument to the using helper.

  • -
  • Added a commandprefix option to the LatexFormatter which allows -to control how the command names are constructed.

  • -
  • Added quite a few new lexers, thanks to Tim Hatch:

    -
      -
    • Java Server Pages

    • -
    • Windows batch files

    • -
    • Trac Wiki markup

    • -
    • Python tracebacks

    • -
    • ReStructuredText

    • -
    • Dylan

    • -
    • and the Befunge esoteric programming language (yay!)

    • -
    -
  • -
  • Added Mako lexers by Ben Bangert.

  • -
  • Added “fruity” style, another dark background originally vim-based -theme.

  • -
  • Added sources.list lexer by Dennis Kaarsemaker.

  • -
  • Added token stream filters, and a pygmentize option to use them.

  • -
  • Changed behavior of in Operator for tokens.

  • -
  • Added mimetypes for all lexers.

  • -
  • Fixed some problems lexing Python strings.

  • -
  • Fixed tickets: #167, #178, #179, #180, #185, #201.

  • -
-
-
-

Version 0.6¶

-

(codename Zimtstern, released Dec 20, 2006)

-
    -
  • Added option for the HTML formatter to write the CSS to an external -file in “full document” mode.

  • -
  • Added RTF formatter.

  • -
  • Added Bash and Apache configuration lexers (thanks to Tim Hatch).

  • -
  • Improved guessing methods for various lexers.

  • -
  • Added @media support to CSS lexer (thanks to Tim Hatch).

  • -
  • Added a Groff lexer (thanks to Tim Hatch).

  • -
  • License change to BSD.

  • -
  • Added lexers for the Myghty template language.

  • -
  • Added a Scheme lexer (thanks to Marek Kubica).

  • -
  • Added some functions to iterate over existing lexers, formatters and -lexers.

  • -
  • The HtmlFormatter’s get_style_defs() can now take a list as an -argument to generate CSS with multiple prefixes.

  • -
  • Support for guessing input encoding added.

  • -
  • Encoding support added: all processing is now done with Unicode -strings, input and output are converted from and optionally to byte -strings (see the encoding option of lexers and formatters).

  • -
  • Some improvements in the C(++) lexers handling comments and line -continuations.

  • -
-
-
-

Version 0.5.1¶

-

(released Oct 30, 2006)

-
    -
  • Fixed traceback in pygmentize -L (thanks to Piotr Ozarowski).

  • -
-
-
-

Version 0.5¶

-

(codename PyKleur, released Oct 30, 2006)

-
    -
  • Initial public release.

  • -
-
-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/docs/cmdline.html b/doc/_build/html/docs/cmdline.html deleted file mode 100644 index 5f08230..0000000 --- a/doc/_build/html/docs/cmdline.html +++ /dev/null @@ -1,282 +0,0 @@ - - - - - - - Command Line Interface — Pygments - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Command Line Interface¶

-

You can use Pygments from the shell, provided you installed the -pygmentize script:

-
$ pygmentize test.py
-print "Hello World"
-
-
-

will print the file test.py to standard output, using the Python lexer -(inferred from the file name extension) and the terminal formatter (because -you didn’t give an explicit formatter name).

-

If you want HTML output:

-
$ pygmentize -f html -l python -o test.html test.py
-
-
-

As you can see, the -l option explicitly selects a lexer. As seen above, if you -give an input file name and it has an extension that Pygments recognizes, you can -omit this option.

-

The -o option gives an output file name. If it is not given, output is -written to stdout.

-

The -f option selects a formatter (as with -l, it can also be omitted -if an output file name is given and has a supported extension). -If no output file name is given and -f is omitted, the -TerminalFormatter is used.

-

The above command could therefore also be given as:

-
$ pygmentize -o test.html test.py
-
-
-

To create a full HTML document, including line numbers and stylesheet (using the -“emacs” style), highlighting the Python file test.py to test.html:

-
$ pygmentize -O full,style=emacs -o test.html test.py
-
-
-
-

Options and filters¶

-

Lexer and formatter options can be given using the -O option:

-
$ pygmentize -f html -O style=colorful,linenos=1 -l python test.py
-
-
-

Be sure to enclose the option string in quotes if it contains any special shell -characters, such as spaces or expansion wildcards like *. If an option -expects a list value, separate the list entries with spaces (you’ll have to -quote the option value in this case too, so that the shell doesn’t split it).

-

Since the -O option argument is split at commas and expects the split values -to be of the form name=value, you can’t give an option value that contains -commas or equals signs. Therefore, an option -P is provided (as of Pygments -0.9) that works like -O but can only pass one option per -P. Its value -can then contain all characters:

-
$ pygmentize -P "heading=Pygments, the Python highlighter" ...
-
-
-

Filters are added to the token stream using the -F option:

-
$ pygmentize -f html -l pascal -F keywordcase:case=upper main.pas
-
-
-

As you see, options for the filter are given after a colon. As for -O, the -filter name and options must be one shell word, so there may not be any spaces -around the colon.

-
-
-

Generating styles¶

-

Formatters normally don’t output full style information. For example, the HTML -formatter by default only outputs <span> tags with class attributes. -Therefore, there’s a special -S option for generating style definitions. -Usage is as follows:

-
$ pygmentize -f html -S colorful -a .syntax
-
-
-

generates a CSS style sheet (because you selected the HTML formatter) for -the “colorful” style prepending a “.syntax” selector to all style rules.

-

For an explanation what -a means for a particular formatter, look for the arg argument for the formatter’s -get_style_defs() method.

-
-
-

Getting lexer names¶

-
-

New in version 1.0.

-
-

The -N option guesses a lexer name for a given filename, so that

-
$ pygmentize -N setup.py
-
-
-

will print out python. It won’t highlight anything yet. If no specific -lexer is known for that filename, text is printed.

-
-
-

Custom Lexers and Formatters¶

-
-

New in version 2.2.

-
-

The -x flag enables custom lexers and formatters to be loaded -from files relative to the current directory. Create a file with a class named -CustomLexer or CustomFormatter, then specify it on the command line:

-
$ pygmentize -l your_lexer.py -f your_formatter.py -x
-
-
-

You can also specify the name of your class with a colon:

-
$ pygmentize -l your_lexer.py:SomeLexer -x
-
-
-

For more information, see the Pygments documentation on Lexer development.

-
-
-

Getting help¶

-

The -L option lists lexers, formatters, along with their short -names and supported file name extensions, styles and filters. If you want to see -only one category, give it as an argument:

-
$ pygmentize -L filters
-
-
-

will list only all installed filters.

-

The -H option will give you detailed information (the same that can be found -in this documentation) about a lexer, formatter or filter. Usage is as follows:

-
$ pygmentize -H formatter html
-
-
-

will print the help for the HTML formatter, while

-
$ pygmentize -H lexer python
-
-
-

will print the help for the Python lexer, etc.

-
-
-

A note on encodings¶

-
-

New in version 0.9.

-
-

Pygments tries to be smart regarding encodings in the formatting process:

-
    -
  • If you give an encoding option, it will be used as the input and -output encoding.

  • -
  • If you give an outencoding option, it will override encoding -as the output encoding.

  • -
  • If you give an inencoding option, it will override encoding -as the input encoding.

  • -
  • If you don’t give an encoding and have given an output file, the default -encoding for lexer and formatter is the terminal encoding or the default -locale encoding of the system. As a last resort, latin1 is used (which -will pass through all non-ASCII characters).

  • -
  • If you don’t give an encoding and haven’t given an output file (that means -output is written to the console), the default encoding for lexer and -formatter is the terminal encoding (sys.stdout.encoding).

  • -
-
-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/docs/filterdevelopment.html b/doc/_build/html/docs/filterdevelopment.html deleted file mode 100644 index 29e9c97..0000000 --- a/doc/_build/html/docs/filterdevelopment.html +++ /dev/null @@ -1,194 +0,0 @@ - - - - - - - Write your own filter — Pygments - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Write your own filter¶

-
-

New in version 0.7.

-
-

Writing own filters is very easy. All you have to do is to subclass -the Filter class and override the filter method. Additionally a -filter is instantiated with some keyword arguments you can use to -adjust the behavior of your filter.

-
-

Subclassing Filters¶

-

As an example, we write a filter that converts all Name.Function tokens -to normal Name tokens to make the output less colorful.

-
from pygments.util import get_bool_opt
-from pygments.token import Name
-from pygments.filter import Filter
-
-class UncolorFilter(Filter):
-
-    def __init__(self, **options):
-        Filter.__init__(self, **options)
-        self.class_too = get_bool_opt(options, 'classtoo')
-
-    def filter(self, lexer, stream):
-        for ttype, value in stream:
-            if ttype is Name.Function or (self.class_too and
-                                          ttype is Name.Class):
-                ttype = Name
-            yield ttype, value
-
-
-

Some notes on the lexer argument: that can be quite confusing since it doesn’t -need to be a lexer instance. If a filter was added by using the add_filter() -function of lexers, that lexer is registered for the filter. In that case -lexer will refer to the lexer that has registered the filter. It can be used -to access options passed to a lexer. Because it could be None you always have -to check for that case if you access it.

-
-
-

Using a decorator¶

-

You can also use the simplefilter decorator from the pygments.filter module:

-
from pygments.util import get_bool_opt
-from pygments.token import Name
-from pygments.filter import simplefilter
-
-
-@simplefilter
-def uncolor(self, lexer, stream, options):
-    class_too = get_bool_opt(options, 'classtoo')
-    for ttype, value in stream:
-        if ttype is Name.Function or (class_too and
-                                      ttype is Name.Class):
-            ttype = Name
-        yield ttype, value
-
-
-

The decorator automatically subclasses an internal filter class and uses the -decorated function as a method for filtering. (That’s why there is a self -argument that you probably won’t end up using in the method.)

-
-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/docs/filters.html b/doc/_build/html/docs/filters.html deleted file mode 100644 index fd2b579..0000000 --- a/doc/_build/html/docs/filters.html +++ /dev/null @@ -1,324 +0,0 @@ - - - - - - - Filters — Pygments - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Filters¶

-
-

New in version 0.7.

-
-

You can filter token streams coming from lexers to improve or annotate the -output. For example, you can highlight special words in comments, convert -keywords to upper or lowercase to enforce a style guide etc.

-

To apply a filter, you can use the add_filter() method of a lexer:

-
>>> from pygments.lexers import PythonLexer
->>> l = PythonLexer()
->>> # add a filter given by a string and options
->>> l.add_filter('codetagify', case='lower')
->>> l.filters
-[<pygments.filters.CodeTagFilter object at 0xb785decc>]
->>> from pygments.filters import KeywordCaseFilter
->>> # or give an instance
->>> l.add_filter(KeywordCaseFilter(case='lower'))
-
-
-

The add_filter() method takes keyword arguments which are forwarded to -the constructor of the filter.

-

To get a list of all registered filters by name, you can use the -get_all_filters() function from the pygments.filters module that returns an -iterable for all known filters.

-

If you want to write your own filter, have a look at Write your own filter.

-
-

Builtin Filters¶

-
-
-class CodeTagFilter¶
-
-
Name
-

codetagify

-
-
-

Highlight special code tags in comments and docstrings.

-

Options accepted:

-
-
codetagslist of strings

A list of strings that are flagged as code tags. The default is to -highlight XXX, TODO, BUG and NOTE.

-
-
-
- -
-
-class KeywordCaseFilter¶
-
-
Name
-

keywordcase

-
-
-

Convert keywords to lowercase or uppercase or capitalize them, which -means first letter uppercase, rest lowercase.

-

This can be useful e.g. if you highlight Pascal code and want to adapt the -code to your styleguide.

-

Options accepted:

-
-
casestring

The casing to convert keywords to. Must be one of 'lower', -'upper' or 'capitalize'. The default is 'lower'.

-
-
-
- -
-
-class NameHighlightFilter¶
-
-
Name
-

highlight

-
-
-

Highlight a normal Name (and Name.*) token with a different token type.

-

Example:

-
filter = NameHighlightFilter(
-    names=['foo', 'bar', 'baz'],
-    tokentype=Name.Function,
-)
-
-
-

This would highlight the names “foo”, “bar” and “baz” -as functions. Name.Function is the default token type.

-

Options accepted:

-
-
nameslist of strings

A list of names that should be given the different token type. -There is no default.

-
-
tokentypeTokenType or string

A token type or a string containing a token type name that is -used for highlighting the strings in names. The default is -Name.Function.

-
-
-
- -
-
-class RaiseOnErrorTokenFilter¶
-
-
Name
-

raiseonerror

-
-
-

Raise an exception when the lexer generates an error token.

-

Options accepted:

-
-
excclassException class

The exception class to raise. -The default is pygments.filters.ErrorToken.

-
-
-
-

New in version 0.8.

-
-
- -
-
-class VisibleWhitespaceFilter¶
-
-
Name
-

whitespace

-
-
-

Convert tabs, newlines and/or spaces to visible characters.

-

Options accepted:

-
-
spacesstring or bool

If this is a one-character string, spaces will be replaces by this string. -If it is another true value, spaces will be replaced by · (unicode -MIDDLE DOT). If it is a false value, spaces will not be replaced. The -default is False.

-
-
tabsstring or bool

The same as for spaces, but the default replacement character is » -(unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value -is False. Note: this will not work if the tabsize option for the -lexer is nonzero, as tabs will already have been expanded then.

-
-
tabsizeint

If tabs are to be replaced by this filter (see the tabs option), this -is the total number of characters that a tab should be expanded to. -The default is 8.

-
-
newlinesstring or bool

The same as for spaces, but the default replacement character is ¶ -(unicode PILCROW SIGN). The default value is False.

-
-
wstokentypebool

If true, give whitespace the special Whitespace token type. This allows -styling the visible whitespace differently (e.g. greyed out), but it can -disrupt background colors. The default is True.

-
-
-
-

New in version 0.8.

-
-
- -
-
-class GobbleFilter¶
-
-
Name
-

gobble

-
-
-

Gobbles source code lines (eats initial characters).

-

This filter drops the first n characters off every line of code. This -may be useful when the source code fed to the lexer is indented by a fixed -amount of space that isn’t desired in the output.

-

Options accepted:

-
-
nint

The number of characters to gobble.

-
-
-
-

New in version 1.2.

-
-
- -
-
-class TokenMergeFilter¶
-
-
Name
-

tokenmerge

-
-
-

Merges consecutive tokens with the same token type in the output -stream of a lexer.

-
-

New in version 1.2.

-
-
- -
-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/docs/formatterdevelopment.html b/doc/_build/html/docs/formatterdevelopment.html deleted file mode 100644 index 43bbd36..0000000 --- a/doc/_build/html/docs/formatterdevelopment.html +++ /dev/null @@ -1,281 +0,0 @@ - - - - - - - Write your own formatter — Pygments - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Write your own formatter¶

-

As well as creating your own lexer, writing a new -formatter for Pygments is easy and straightforward.

-

A formatter is a class that is initialized with some keyword arguments (the -formatter options) and that must provides a format() method. -Additionally a formatter should provide a get_style_defs() method that -returns the style definitions from the style in a form usable for the -formatter’s output format.

-
-

Quickstart¶

-

The most basic formatter shipped with Pygments is the NullFormatter. It just -sends the value of a token to the output stream:

-
from pygments.formatter import Formatter
-
-class NullFormatter(Formatter):
-    def format(self, tokensource, outfile):
-        for ttype, value in tokensource:
-            outfile.write(value)
-
-
-

As you can see, the format() method is passed two parameters: tokensource -and outfile. The first is an iterable of (token_type, value) tuples, -the latter a file like object with a write() method.

-

Because the formatter is that basic it doesn’t overwrite the get_style_defs() -method.

-
-
-

Styles¶

-

Styles aren’t instantiated but their metaclass provides some class functions -so that you can access the style definitions easily.

-

Styles are iterable and yield tuples in the form (ttype, d) where ttype -is a token and d is a dict with the following keys:

-
-
'color'

Hexadecimal color value (eg: 'ff0000' for red) or None if not -defined.

-
-
'bold'

True if the value should be bold

-
-
'italic'

True if the value should be italic

-
-
'underline'

True if the value should be underlined

-
-
'bgcolor'

Hexadecimal color value for the background (eg: 'eeeeeee' for light -gray) or None if not defined.

-
-
'border'

Hexadecimal color value for the border (eg: '0000aa' for a dark -blue) or None for no border.

-
-
-

Additional keys might appear in the future, formatters should ignore all keys -they don’t support.

-
-
-

HTML 3.2 Formatter¶

-

For an more complex example, let’s implement a HTML 3.2 Formatter. We don’t -use CSS but inline markup (<u>, <font>, etc). Because this isn’t good -style this formatter isn’t in the standard library ;-)

-
from pygments.formatter import Formatter
-
-class OldHtmlFormatter(Formatter):
-
-    def __init__(self, **options):
-        Formatter.__init__(self, **options)
-
-        # create a dict of (start, end) tuples that wrap the
-        # value of a token so that we can use it in the format
-        # method later
-        self.styles = {}
-
-        # we iterate over the `_styles` attribute of a style item
-        # that contains the parsed style values.
-        for token, style in self.style:
-            start = end = ''
-            # a style item is a tuple in the following form:
-            # colors are readily specified in hex: 'RRGGBB'
-            if style['color']:
-                start += '<font color="#%s">' % style['color']
-                end = '</font>' + end
-            if style['bold']:
-                start += '<b>'
-                end = '</b>' + end
-            if style['italic']:
-                start += '<i>'
-                end = '</i>' + end
-            if style['underline']:
-                start += '<u>'
-                end = '</u>' + end
-            self.styles[token] = (start, end)
-
-    def format(self, tokensource, outfile):
-        # lastval is a string we use for caching
-        # because it's possible that an lexer yields a number
-        # of consecutive tokens with the same token type.
-        # to minimize the size of the generated html markup we
-        # try to join the values of same-type tokens here
-        lastval = ''
-        lasttype = None
-
-        # wrap the whole output with <pre>
-        outfile.write('<pre>')
-
-        for ttype, value in tokensource:
-            # if the token type doesn't exist in the stylemap
-            # we try it with the parent of the token type
-            # eg: parent of Token.Literal.String.Double is
-            # Token.Literal.String
-            while ttype not in self.styles:
-                ttype = ttype.parent
-            if ttype == lasttype:
-                # the current token type is the same of the last
-                # iteration. cache it
-                lastval += value
-            else:
-                # not the same token as last iteration, but we
-                # have some data in the buffer. wrap it with the
-                # defined style and write it to the output file
-                if lastval:
-                    stylebegin, styleend = self.styles[lasttype]
-                    outfile.write(stylebegin + lastval + styleend)
-                # set lastval/lasttype to current values
-                lastval = value
-                lasttype = ttype
-
-        # if something is left in the buffer, write it to the
-        # output file, then close the opened <pre> tag
-        if lastval:
-            stylebegin, styleend = self.styles[lasttype]
-            outfile.write(stylebegin + lastval + styleend)
-        outfile.write('</pre>\n')
-
-
-

The comments should explain it. Again, this formatter doesn’t override the -get_style_defs() method. If we would have used CSS classes instead of -inline HTML markup, we would need to generate the CSS first. For that -purpose the get_style_defs() method exists:

-
-
-

Generating Style Definitions¶

-

Some formatters like the LatexFormatter and the HtmlFormatter don’t -output inline markup but reference either macros or css classes. Because -the definitions of those are not part of the output, the get_style_defs() -method exists. It is passed one parameter (if it’s used and how it’s used -is up to the formatter) and has to return a string or None.

-
-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/docs/formatters.html b/doc/_build/html/docs/formatters.html deleted file mode 100644 index 5266a3d..0000000 --- a/doc/_build/html/docs/formatters.html +++ /dev/null @@ -1,976 +0,0 @@ - - - - - - - Available formatters — Pygments - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Available formatters¶

-

This page lists all builtin formatters.

-
-

Common options¶

-

All formatters support these options:

-
-
encoding

If given, must be an encoding name (such as "utf-8"). This will -be used to convert the token strings (which are Unicode strings) -to byte strings in the output (default: None). -It will also be written in an encoding declaration suitable for the -document format if the full option is given (e.g. a meta -content-type directive in HTML or an invocation of the inputenc -package in LaTeX).

-

If this is "" or None, Unicode strings will be written -to the output file, which most file-like objects do not support. -For example, pygments.highlight() will return a Unicode string if -called with no outfile argument and a formatter that has encoding -set to None because it uses a StringIO.StringIO object that -supports Unicode arguments to write(). Using a regular file object -wouldn’t work.

-
-

New in version 0.6.

-
-
-
outencoding

When using Pygments from the command line, any encoding option given is -passed to the lexer and the formatter. This is sometimes not desirable, -for example if you want to set the input encoding to "guess". -Therefore, outencoding has been introduced which overrides encoding -for the formatter if given.

-
-

New in version 0.7.

-
-
-
-
-
-

Formatter classes¶

-

All these classes are importable from pygments.formatters.

-
-
-class BBCodeFormatter¶
-
-
Short names
-

bbcode, bb

-
-
Filenames
-

None

-
-
-

Format tokens with BBcodes. These formatting codes are used by many -bulletin boards, so you can highlight your sourcecode with pygments before -posting it there.

-

This formatter has no support for background colors and borders, as there -are no common BBcode tags for that.

-

Some board systems (e.g. phpBB) don’t support colors in their [code] tag, -so you can’t use the highlighting together with that tag. -Text in a [code] tag usually is shown with a monospace font (which this -formatter can do with the monofont option) and no spaces (which you -need for indentation) are removed.

-

Additional options accepted:

-
-
style

The style to use, can be a string or a Style subclass (default: -'default').

-
-
codetag

If set to true, put the output into [code] tags (default: -false)

-
-
monofont

If set to true, add a tag to show the code with a monospace font -(default: false).

-
-
-
- -
-
-class BmpImageFormatter¶
-
-
Short names
-

bmp, bitmap

-
-
Filenames
-

*.bmp

-
-
-

Create a bitmap image from source code. This uses the Python Imaging Library to -generate a pixmap from the source code.

-
-

New in version 1.0.

-
-
- -
-
-class GifImageFormatter¶
-
-
Short names
-

gif

-
-
Filenames
-

*.gif

-
-
-

Create a GIF image from source code. This uses the Python Imaging Library to -generate a pixmap from the source code.

-
-

New in version 1.0.

-
-
- -
-
-class HtmlFormatter¶
-
-
Short names
-

html

-
-
Filenames
-

*.html, *.htm

-
-
-

Format tokens as HTML 4 <span> tags within a <pre> tag, wrapped -in a <div> tag. The <div>’s CSS class can be set by the cssclass -option.

-

If the linenos option is set to "table", the <pre> is -additionally wrapped inside a <table> which has one row and two -cells: one containing the line numbers and one containing the code. -Example:

-
<div class="highlight" >
-<table><tr>
-  <td class="linenos" title="click to toggle"
-    onclick="with (this.firstChild.style)
-             { display = (display == '') ? 'none' : '' }">
-    <pre>1
-    2</pre>
-  </td>
-  <td class="code">
-    <pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar):
-      <span class="Ke">pass</span>
-    </pre>
-  </td>
-</tr></table></div>
-
-
-

(whitespace added to improve clarity).

-

Wrapping can be disabled using the nowrap option.

-

A list of lines can be specified using the hl_lines option to make these -lines highlighted (as of Pygments 0.11).

-

With the full option, a complete HTML 4 document is output, including -the style definitions inside a <style> tag, or in a separate file if -the cssfile option is given.

-

When tagsfile is set to the path of a ctags index file, it is used to -generate hyperlinks from names to their definition. You must enable -lineanchors and run ctags with the -n option for this to work. The -python-ctags module from PyPI must be installed to use this feature; -otherwise a RuntimeError will be raised.

-

The get_style_defs(arg=’’) method of a HtmlFormatter returns a string -containing CSS rules for the CSS classes used by the formatter. The -argument arg can be used to specify additional CSS selectors that -are prepended to the classes. A call fmter.get_style_defs(‘td .code’) -would result in the following CSS classes:

-
td .code .kw { font-weight: bold; color: #00FF00 }
-td .code .cm { color: #999999 }
-...
-
-
-

If you have Pygments 0.6 or higher, you can also pass a list or tuple to the -get_style_defs() method to request multiple prefixes for the tokens:

-
formatter.get_style_defs(['div.syntax pre', 'pre.syntax'])
-
-
-

The output would then look like this:

-
div.syntax pre .kw,
-pre.syntax .kw { font-weight: bold; color: #00FF00 }
-div.syntax pre .cm,
-pre.syntax .cm { color: #999999 }
-...
-
-
-

Additional options accepted:

-
-
nowrap

If set to True, don’t wrap the tokens at all, not even inside a <pre> -tag. This disables most other options (default: False).

-
-
full

Tells the formatter to output a “full” document, i.e. a complete -self-contained document (default: False).

-
-
title

If full is true, the title that should be used to caption the -document (default: '').

-
-
style

The style to use, can be a string or a Style subclass (default: -'default'). This option has no effect if the cssfile -and noclobber_cssfile option are given and the file specified in -cssfile exists.

-
-
noclasses

If set to true, token <span> tags will not use CSS classes, but -inline styles. This is not recommended for larger pieces of code since -it increases output size by quite a bit (default: False).

-
-
classprefix

Since the token types use relatively short class names, they may clash -with some of your own class names. In this case you can use the -classprefix option to give a string to prepend to all Pygments-generated -CSS class names for token types. -Note that this option also affects the output of get_style_defs().

-
-
cssclass

CSS class for the wrapping <div> tag (default: 'highlight'). -If you set this option, the default selector for get_style_defs() -will be this class.

-
-

New in version 0.9: If you select the 'table' line numbers, the wrapping table will -have a CSS class of this string plus 'table', the default is -accordingly 'highlighttable'.

-
-
-
cssstyles

Inline CSS styles for the wrapping <div> tag (default: '').

-
-
prestyles

Inline CSS styles for the <pre> tag (default: '').

-
-

New in version 0.11.

-
-
-
cssfile

If the full option is true and this option is given, it must be the -name of an external file. If the filename does not include an absolute -path, the file’s path will be assumed to be relative to the main output -file’s path, if the latter can be found. The stylesheet is then written -to this file instead of the HTML file.

-
-

New in version 0.6.

-
-
-
noclobber_cssfile

If cssfile is given and the specified file exists, the css file will -not be overwritten. This allows the use of the full option in -combination with a user specified css file. Default is False.

-
-

New in version 1.1.

-
-
-
linenos

If set to 'table', output line numbers as a table with two cells, -one containing the line numbers, the other the whole code. This is -copy-and-paste-friendly, but may cause alignment problems with some -browsers or fonts. If set to 'inline', the line numbers will be -integrated in the <pre> tag that contains the code (that setting -is new in Pygments 0.8).

-

For compatibility with Pygments 0.7 and earlier, every true value -except 'inline' means the same as 'table' (in particular, that -means also True).

-

The default value is False, which means no line numbers at all.

-

Note: with the default (“table”) line number mechanism, the line -numbers and code can have different line heights in Internet Explorer -unless you give the enclosing <pre> tags an explicit line-height -CSS property (you get the default line spacing with line-height: -125%).

-
-
hl_lines

Specify a list of lines to be highlighted.

-
-

New in version 0.11.

-
-
-
linenostart

The line number for the first line (default: 1).

-
-
linenostep

If set to a number n > 1, only every nth line number is printed.

-
-
linenospecial

If set to a number n > 0, every nth line number is given the CSS -class "special" (default: 0).

-
-
nobackground

If set to True, the formatter won’t output the background color -for the wrapping element (this automatically defaults to False -when there is no wrapping element [eg: no argument for the -get_syntax_defs method given]) (default: False).

-
-

New in version 0.6.

-
-
-
lineseparator

This string is output between lines of code. It defaults to "\n", -which is enough to break a line inside <pre> tags, but you can -e.g. set it to "<br>" to get HTML line breaks.

-
-

New in version 0.7.

-
-
-
lineanchors

If set to a nonempty string, e.g. foo, the formatter will wrap each -output line in an anchor tag with a name of foo-linenumber. -This allows easy linking to certain lines.

-
-

New in version 0.9.

-
-
-
linespans

If set to a nonempty string, e.g. foo, the formatter will wrap each -output line in a span tag with an id of foo-linenumber. -This allows easy access to lines via javascript.

-
-

New in version 1.6.

-
-
-
anchorlinenos

If set to True, will wrap line numbers in <a> tags. Used in -combination with linenos and lineanchors.

-
-
tagsfile

If set to the path of a ctags file, wrap names in anchor tags that -link to their definitions. lineanchors should be used, and the -tags file should specify line numbers (see the -n option to ctags).

-
-

New in version 1.6.

-
-
-
tagurlformat

A string formatting pattern used to generate links to ctags definitions. -Available variables are %(path)s, %(fname)s and %(fext)s. -Defaults to an empty string, resulting in just #prefix-number links.

-
-

New in version 1.6.

-
-
-
filename

A string used to generate a filename when rendering <pre> blocks, -for example if displaying source code.

-
-

New in version 2.1.

-
-
-
wrapcode

Wrap the code inside <pre> blocks using <code>, as recommended -by the HTML5 specification.

-
-

New in version 2.4.

-
-
-
-

Subclassing the HTML formatter

-
-

New in version 0.7.

-
-

The HTML formatter is now built in a way that allows easy subclassing, thus -customizing the output HTML code. The format() method calls -self._format_lines() which returns a generator that yields tuples of (1, -line), where the 1 indicates that the line is a line of the -formatted source code.

-

If the nowrap option is set, the generator is the iterated over and the -resulting HTML is output.

-

Otherwise, format() calls self.wrap(), which wraps the generator with -other generators. These may add some HTML code to the one generated by -_format_lines(), either by modifying the lines generated by the latter, -then yielding them again with (1, line), and/or by yielding other HTML -code before or after the lines, with (0, html). The distinction between -source lines and other code makes it possible to wrap the generator multiple -times.

-

The default wrap() implementation adds a <div> and a <pre> tag.

-

A custom HtmlFormatter subclass could look like this:

-
class CodeHtmlFormatter(HtmlFormatter):
-
-    def wrap(self, source, outfile):
-        return self._wrap_code(source)
-
-    def _wrap_code(self, source):
-        yield 0, '<code>'
-        for i, t in source:
-            if i == 1:
-                # it's a line of formatted code
-                t += '<br>'
-            yield i, t
-        yield 0, '</code>'
-
-
-

This results in wrapping the formatted lines with a <code> tag, where the -source lines are broken using <br> tags.

-

After calling wrap(), the format() method also adds the “line numbers” -and/or “full document” wrappers if the respective options are set. Then, all -HTML yielded by the wrapped generator is output.

-
- -
-
-class IRCFormatter¶
-
-
Short names
-

irc, IRC

-
-
Filenames
-

None

-
-
-

Format tokens with IRC color sequences

-

The get_style_defs() method doesn’t do anything special since there is -no support for common styles.

-

Options accepted:

-
-
bg

Set to "light" or "dark" depending on the terminal’s background -(default: "light").

-
-
colorscheme

A dictionary mapping token types to (lightbg, darkbg) color names or -None (default: None = use builtin colorscheme).

-
-
linenos

Set to True to have line numbers in the output as well -(default: False = no line numbers).

-
-
-
- -
-
-class ImageFormatter¶
-
-
Short names
-

img, IMG, png

-
-
Filenames
-

*.png

-
-
-

Create a PNG image from source code. This uses the Python Imaging Library to -generate a pixmap from the source code.

-
-

New in version 0.10.

-
-

Additional options accepted:

-
-
image_format

An image format to output to that is recognised by PIL, these include:

-
    -
  • “PNG” (default)

  • -
  • “JPEG”

  • -
  • “BMP”

  • -
  • “GIF”

  • -
-
-
line_pad

The extra spacing (in pixels) between each line of text.

-

Default: 2

-
-
font_name

The font name to be used as the base font from which others, such as -bold and italic fonts will be generated. This really should be a -monospace font to look sane.

-
-
Default: “Courier New” on Windows, “Menlo” on Mac OS, and

“DejaVu Sans Mono” on *nix

-
-
-
-
font_size

The font size in points to be used.

-

Default: 14

-
-
image_pad

The padding, in pixels to be used at each edge of the resulting image.

-

Default: 10

-
-
line_numbers

Whether line numbers should be shown: True/False

-

Default: True

-
-
line_number_start

The line number of the first line.

-

Default: 1

-
-
line_number_step

The step used when printing line numbers.

-

Default: 1

-
-
line_number_bg

The background colour (in “#123456” format) of the line number bar, or -None to use the style background color.

-

Default: “#eed”

-
-
line_number_fg

The text color of the line numbers (in “#123456”-like format).

-

Default: “#886”

-
-
line_number_chars

The number of columns of line numbers allowable in the line number -margin.

-

Default: 2

-
-
line_number_bold

Whether line numbers will be bold: True/False

-

Default: False

-
-
line_number_italic

Whether line numbers will be italicized: True/False

-

Default: False

-
-
line_number_separator

Whether a line will be drawn between the line number area and the -source code area: True/False

-

Default: True

-
-
line_number_pad

The horizontal padding (in pixels) between the line number margin, and -the source code area.

-

Default: 6

-
-
hl_lines

Specify a list of lines to be highlighted.

-
-

New in version 1.2.

-
-

Default: empty list

-
-
hl_color

Specify the color for highlighting lines.

-
-

New in version 1.2.

-
-

Default: highlight color of the selected style

-
-
-
- -
-
-class JpgImageFormatter¶
-
-
Short names
-

jpg, jpeg

-
-
Filenames
-

*.jpg

-
-
-

Create a JPEG image from source code. This uses the Python Imaging Library to -generate a pixmap from the source code.

-
-

New in version 1.0.

-
-
- -
-
-class LatexFormatter¶
-
-
Short names
-

latex, tex

-
-
Filenames
-

*.tex

-
-
-

Format tokens as LaTeX code. This needs the fancyvrb and color -standard packages.

-

Without the full option, code is formatted as one Verbatim -environment, like this:

-
\begin{Verbatim}[commandchars=\\\{\}]
-\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
-    \PY{k}{pass}
-\end{Verbatim}
-
-
-

The special command used here (\PY) and all the other macros it needs -are output by the get_style_defs method.

-

With the full option, a complete LaTeX document is output, including -the command definitions in the preamble.

-

The get_style_defs() method of a LatexFormatter returns a string -containing \def commands defining the macros needed inside the -Verbatim environments.

-

Additional options accepted:

-
-
style

The style to use, can be a string or a Style subclass (default: -'default').

-
-
full

Tells the formatter to output a “full” document, i.e. a complete -self-contained document (default: False).

-
-
title

If full is true, the title that should be used to caption the -document (default: '').

-
-
docclass

If the full option is enabled, this is the document class to use -(default: 'article').

-
-
preamble

If the full option is enabled, this can be further preamble commands, -e.g. \usepackage (default: '').

-
-
linenos

If set to True, output line numbers (default: False).

-
-
linenostart

The line number for the first line (default: 1).

-
-
linenostep

If set to a number n > 1, only every nth line number is printed.

-
-
verboptions

Additional options given to the Verbatim environment (see the fancyvrb -docs for possible values) (default: '').

-
-
commandprefix

The LaTeX commands used to produce colored output are constructed -using this prefix and some letters (default: 'PY').

-
-

New in version 0.7.

-
-
-

Changed in version 0.10: The default is now 'PY' instead of 'C'.

-
-
-
texcomments

If set to True, enables LaTeX comment lines. That is, LaTex markup -in comment tokens is not escaped so that LaTeX can render it (default: -False).

-
-

New in version 1.2.

-
-
-
mathescape

If set to True, enables LaTeX math mode escape in comments. That -is, '$...$' inside a comment will trigger math mode (default: -False).

-
-

New in version 1.2.

-
-
-
escapeinside

If set to a string of length 2, enables escaping to LaTeX. Text -delimited by these 2 characters is read as LaTeX code and -typeset accordingly. It has no effect in string literals. It has -no effect in comments if texcomments or mathescape is -set. (default: '').

-
-

New in version 2.0.

-
-
-
envname

Allows you to pick an alternative environment name replacing Verbatim. -The alternate environment still has to support Verbatim’s option syntax. -(default: 'Verbatim').

-
-

New in version 2.0.

-
-
-
-
- -
-
-class NullFormatter¶
-
-
Short names
-

text, null

-
-
Filenames
-

*.txt

-
-
-

Output the text unchanged without any formatting.

-
- -
-
-class RawTokenFormatter¶
-
-
Short names
-

raw, tokens

-
-
Filenames
-

*.raw

-
-
-

Format tokens as a raw representation for storing token streams.

-

The format is tokentype<TAB>repr(tokenstring)\n. The output can later -be converted to a token stream with the RawTokenLexer, described in the -lexer list.

-

Only two options are accepted:

-
-
compress

If set to 'gz' or 'bz2', compress the output with the given -compression algorithm after encoding (default: '').

-
-
error_color

If set to a color name, highlight error tokens using that color. If -set but with no value, defaults to 'red'.

-
-

New in version 0.11.

-
-
-
-
- -
-
-class RtfFormatter¶
-
-
Short names
-

rtf

-
-
Filenames
-

*.rtf

-
-
-

Format tokens as RTF markup. This formatter automatically outputs full RTF -documents with color information and other useful stuff. Perfect for Copy and -Paste into Microsoft(R) Word(R) documents.

-

Please note that encoding and outencoding options are ignored. -The RTF format is ASCII natively, but handles unicode characters correctly -thanks to escape sequences.

-
-

New in version 0.6.

-
-

Additional options accepted:

-
-
style

The style to use, can be a string or a Style subclass (default: -'default').

-
-
fontface

The used font family, for example Bitstream Vera Sans. Defaults to -some generic font which is supposed to have fixed width.

-
-
fontsize

Size of the font used. Size is specified in half points. The -default is 24 half-points, giving a size 12 font.

-
-

New in version 2.0.

-
-
-
-
- -
-
-class SvgFormatter¶
-
-
Short names
-

svg

-
-
Filenames
-

*.svg

-
-
-

Format tokens as an SVG graphics file. This formatter is still experimental. -Each line of code is a <text> element with explicit x and y -coordinates containing <tspan> elements with the individual token styles.

-

By default, this formatter outputs a full SVG document including doctype -declaration and the <svg> root element.

-
-

New in version 0.9.

-
-

Additional options accepted:

-
-
nowrap

Don’t wrap the SVG <text> elements in <svg><g> elements and -don’t add a XML declaration and a doctype. If true, the fontfamily -and fontsize options are ignored. Defaults to False.

-
-
fontfamily

The value to give the wrapping <g> element’s font-family -attribute, defaults to "monospace".

-
-
fontsize

The value to give the wrapping <g> element’s font-size -attribute, defaults to "14px".

-
-
xoffset

Starting offset in X direction, defaults to 0.

-
-
yoffset

Starting offset in Y direction, defaults to the font size if it is given -in pixels, or 20 else. (This is necessary since text coordinates -refer to the text baseline, not the top edge.)

-
-
ystep

Offset to add to the Y coordinate for each subsequent line. This should -roughly be the text size plus 5. It defaults to that value if the text -size is given in pixels, or 25 else.

-
-
spacehack

Convert spaces in the source to &#160;, which are non-breaking -spaces. SVG provides the xml:space attribute to control how -whitespace inside tags is handled, in theory, the preserve value -could be used to keep all whitespace as-is. However, many current SVG -viewers don’t obey that rule, so this option is provided as a workaround -and defaults to True.

-
-
-
- -
-
-class Terminal256Formatter¶
-
-
Short names
-

terminal256, console256, 256

-
-
Filenames
-

None

-
-
-

Format tokens with ANSI color sequences, for output in a 256-color -terminal or console. Like in TerminalFormatter color sequences -are terminated at newlines, so that paging the output works correctly.

-

The formatter takes colors from a style defined by the style option -and converts them to nearest ANSI 256-color escape sequences. Bold and -underline attributes from the style are preserved (and displayed).

-
-

New in version 0.9.

-
-
-

Changed in version 2.2: If the used style defines foreground colors in the form #ansi*, then -Terminal256Formatter will map these to non extended foreground color. -See Terminal Styles for more information.

-
-
-

Changed in version 2.4: The ANSI color names have been updated with names that are easier to -understand and align with colornames of other projects and terminals. -See this table for more information.

-
-

Options accepted:

-
-
style

The style to use, can be a string or a Style subclass (default: -'default').

-
-
-
- -
-
-class TerminalFormatter¶
-
-
Short names
-

terminal, console

-
-
Filenames
-

None

-
-
-

Format tokens with ANSI color sequences, for output in a text console. -Color sequences are terminated at newlines, so that paging the output -works correctly.

-

The get_style_defs() method doesn’t do anything special since there is -no support for common styles.

-

Options accepted:

-
-
bg

Set to "light" or "dark" depending on the terminal’s background -(default: "light").

-
-
colorscheme

A dictionary mapping token types to (lightbg, darkbg) color names or -None (default: None = use builtin colorscheme).

-
-
linenos

Set to True to have line numbers on the terminal output as well -(default: False = no line numbers).

-
-
-
- -
-
-class TerminalTrueColorFormatter¶
-
-
Short names
-

terminal16m, console16m, 16m

-
-
Filenames
-

None

-
-
-

Format tokens with ANSI color sequences, for output in a true-color -terminal or console. Like in TerminalFormatter color sequences -are terminated at newlines, so that paging the output works correctly.

-
-

New in version 2.1.

-
-

Options accepted:

-
-
style

The style to use, can be a string or a Style subclass (default: -'default').

-
-
-
- -
-
-class TestcaseFormatter¶
-
-
Short names
-

testcase

-
-
Filenames
-

None

-
-
-

Format tokens as appropriate for a new testcase.

-
-

New in version 2.0.

-
-
- -
-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/docs/index.html b/doc/_build/html/docs/index.html deleted file mode 100644 index 23e0745..0000000 --- a/doc/_build/html/docs/index.html +++ /dev/null @@ -1,179 +0,0 @@ - - - - - - - Pygments documentation — Pygments - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Pygments documentation¶

-

Starting with Pygments

- -

Builtin components

- -

Reference

- -

Hacking for Pygments

- -

Hints and tricks

- -

About Pygments

- -

If you find bugs or have suggestions for the documentation, please submit them -on GitHub <https://github.com/pygments/pygments>.

-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/docs/integrate.html b/doc/_build/html/docs/integrate.html deleted file mode 100644 index 22ac261..0000000 --- a/doc/_build/html/docs/integrate.html +++ /dev/null @@ -1,166 +0,0 @@ - - - - - - - Using Pygments in various scenarios — Pygments - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Using Pygments in various scenarios¶

-
-

Markdown¶

-

Since Pygments 0.9, the distribution ships Markdown preprocessor sample code -that uses Pygments to render source code in -external/markdown-processor.py. You can copy and adapt it to your -liking.

-
-
-

TextMate¶

-

Antonio Cangiano has created a Pygments bundle for TextMate that allows to -colorize code via a simple menu option. It can be found here.

-
-
-

Bash completion¶

-

The source distribution contains a file external/pygments.bashcomp that -sets up completion for the pygmentize command in bash.

-
-
-

Wrappers for other languages¶

-

These libraries provide Pygments highlighting for users of other languages -than Python:

- -
-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/docs/java.html b/doc/_build/html/docs/java.html deleted file mode 100644 index 4dbb483..0000000 --- a/doc/_build/html/docs/java.html +++ /dev/null @@ -1,184 +0,0 @@ - - - - - - - Use Pygments in Java — Pygments - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Use Pygments in Java¶

-

Thanks to Jython it is possible to use Pygments in -Java.

-

This page is a simple tutorial to get an idea of how this works. You can -then look at the Jython documentation for more -advanced uses.

-

Since version 1.5, Pygments is deployed on Maven Central as a JAR, as is Jython -which makes it a lot easier to create a Java project.

-

Here is an example of a Maven pom.xml file for a -project running Pygments:

-
<?xml version="1.0" encoding="UTF-8"?>
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-                             http://maven.apache.org/maven-v4_0_0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <groupId>example</groupId>
-  <artifactId>example</artifactId>
-  <version>1.0-SNAPSHOT</version>
-  <dependencies>
-    <dependency>
-      <groupId>org.python</groupId>
-      <artifactId>jython-standalone</artifactId>
-      <version>2.5.3</version>
-    </dependency>
-    <dependency>
-      <groupId>org.pygments</groupId>
-      <artifactId>pygments</artifactId>
-      <version>1.5</version>
-      <scope>runtime</scope>
-    </dependency>
-  </dependencies>
-</project>
-
-
-

The following Java example:

-
PythonInterpreter interpreter = new PythonInterpreter();
-
-// Set a variable with the content you want to work with
-interpreter.set("code", code);
-
-// Simple use Pygments as you would in Python
-interpreter.exec("from pygments import highlight\n"
-    + "from pygments.lexers import PythonLexer\n"
-    + "from pygments.formatters import HtmlFormatter\n"
-    + "\nresult = highlight(code, PythonLexer(), HtmlFormatter())");
-
-// Get the result that has been set in a variable
-System.out.println(interpreter.get("result", String.class));
-
-
-

will print something like:

-
<div class="highlight">
-<pre><span class="k">print</span> <span class="s">&quot;Hello World&quot;</span></pre>
-</div>
-
-
-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/docs/lexerdevelopment.html b/doc/_build/html/docs/lexerdevelopment.html deleted file mode 100644 index 75ede3b..0000000 --- a/doc/_build/html/docs/lexerdevelopment.html +++ /dev/null @@ -1,774 +0,0 @@ - - - - - - - Write your own lexer — Pygments - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Write your own lexer¶

-

If a lexer for your favorite language is missing in the Pygments package, you -can easily write your own and extend Pygments.

-

All you need can be found inside the pygments.lexer module. As you can -read in the API documentation, a lexer is a class that is -initialized with some keyword arguments (the lexer options) and that provides a -get_tokens_unprocessed() method which is given a string or unicode -object with the data to lex.

-

The get_tokens_unprocessed() method must return an iterator or iterable -containing tuples in the form (index, token, value). Normally you don’t -need to do this since there are base lexers that do most of the work and that -you can subclass.

-
-

RegexLexer¶

-

The lexer base class used by almost all of Pygments’ lexers is the -RegexLexer. This class allows you to define lexing rules in terms of -regular expressions for different states.

-

States are groups of regular expressions that are matched against the input -string at the current position. If one of these expressions matches, a -corresponding action is performed (such as yielding a token with a specific -type, or changing state), the current position is set to where the last match -ended and the matching process continues with the first regex of the current -state.

-

Lexer states are kept on a stack: each time a new state is entered, the new -state is pushed onto the stack. The most basic lexers (like the DiffLexer) -just need one state.

-

Each state is defined as a list of tuples in the form (regex, action, -new_state) where the last item is optional. In the most basic form, action -is a token type (like Name.Builtin). That means: When regex matches, emit a -token with the match text and type tokentype and push new_state on the state -stack. If the new state is '#pop', the topmost state is popped from the -stack instead. To pop more than one state, use '#pop:2' and so on. -'#push' is a synonym for pushing the current state on the stack.

-

The following example shows the DiffLexer from the builtin lexers. Note that -it contains some additional attributes name, aliases and filenames which -aren’t required for a lexer. They are used by the builtin lexer lookup -functions.

-
from pygments.lexer import RegexLexer
-from pygments.token import *
-
-class DiffLexer(RegexLexer):
-    name = 'Diff'
-    aliases = ['diff']
-    filenames = ['*.diff']
-
-    tokens = {
-        'root': [
-            (r' .*\n', Text),
-            (r'\+.*\n', Generic.Inserted),
-            (r'-.*\n', Generic.Deleted),
-            (r'@.*\n', Generic.Subheading),
-            (r'Index.*\n', Generic.Heading),
-            (r'=.*\n', Generic.Heading),
-            (r'.*\n', Text),
-        ]
-    }
-
-
-

As you can see this lexer only uses one state. When the lexer starts scanning -the text, it first checks if the current character is a space. If this is true -it scans everything until newline and returns the data as a Text token (which -is the “no special highlighting” token).

-

If this rule doesn’t match, it checks if the current char is a plus sign. And -so on.

-

If no rule matches at the current position, the current char is emitted as an -Error token that indicates a lexing error, and the position is increased by -one.

-
-
-

Adding and testing a new lexer¶

-

The easiest way to use a new lexer is to use Pygments’ support for loading -the lexer from a file relative to your current directory.

-

First, change the name of your lexer class to CustomLexer:

-
from pygments.lexer import RegexLexer
-from pygments.token import *
-
-class CustomLexer(RegexLexer):
-    """All your lexer code goes here!"""
-
-
-

Then you can load the lexer from the command line with the additional -flag -x:

-
$ pygmentize -l your_lexer_file.py -x
-
-
-

To specify a class name other than CustomLexer, append it with a colon:

-
$ pygmentize -l your_lexer.py:SomeLexer -x
-
-
-

Or, using the Python API:

-
# For a lexer named CustomLexer
-your_lexer = load_lexer_from_file(filename, **options)
-
-# For a lexer named MyNewLexer
-your_named_lexer = load_lexer_from_file(filename, "MyNewLexer", **options)
-
-
-

When loading custom lexers and formatters, be extremely careful to use only -trusted files; Pygments will perform the equivalent of eval on them.

-

If you only want to use your lexer with the Pygments API, you can import and -instantiate the lexer yourself, then pass it to pygments.highlight().

-

To prepare your new lexer for inclusion in the Pygments distribution, so that it -will be found when passing filenames or lexer aliases from the command line, you -have to perform the following steps.

-

First, change to the current directory containing the Pygments source code. You -will need to have either an unpacked source tarball, or (preferably) a copy -cloned from GitHub.

-
$ cd .../pygments-main
-
-
-

Select a matching module under pygments/lexers, or create a new module for -your lexer class.

-

Next, make sure the lexer is known from outside of the module. All modules in -the pygments.lexers package specify __all__. For example, -esoteric.py sets:

-
__all__ = ['BrainfuckLexer', 'BefungeLexer', ...]
-
-
-

Add the name of your lexer class to this list (or create the list if your lexer -is the only class in the module).

-

Finally the lexer can be made publicly known by rebuilding the lexer mapping:

-
$ make mapfiles
-
-
-

To test the new lexer, store an example file with the proper extension in -tests/examplefiles. For example, to test your DiffLexer, add a -tests/examplefiles/example.diff containing a sample diff output.

-

Now you can use pygmentize to render your example to HTML:

-
$ ./pygmentize -O full -f html -o /tmp/example.html tests/examplefiles/example.diff
-
-
-

Note that this explicitly calls the pygmentize in the current directory -by preceding it with ./. This ensures your modifications are used. -Otherwise a possibly already installed, unmodified version without your new -lexer would have been called from the system search path ($PATH).

-

To view the result, open /tmp/example.html in your browser.

-

Once the example renders as expected, you should run the complete test suite:

-
$ make test
-
-
-

It also tests that your lexer fulfills the lexer API and certain invariants, -such as that the concatenation of all token text is the same as the input text.

-
-
-

Regex Flags¶

-

You can either define regex flags locally in the regex (r'(?x)foo bar') or -globally by adding a flags attribute to your lexer class. If no attribute is -defined, it defaults to re.MULTILINE. For more information about regular -expression flags see the page about regular expressions in the Python -documentation.

-
-
-

Scanning multiple tokens at once¶

-

So far, the action element in the rule tuple of regex, action and state has -been a single token type. Now we look at the first of several other possible -values.

-

Here is a more complex lexer that highlights INI files. INI files consist of -sections, comments and key = value pairs:

-
from pygments.lexer import RegexLexer, bygroups
-from pygments.token import *
-
-class IniLexer(RegexLexer):
-    name = 'INI'
-    aliases = ['ini', 'cfg']
-    filenames = ['*.ini', '*.cfg']
-
-    tokens = {
-        'root': [
-            (r'\s+', Text),
-            (r';.*?$', Comment),
-            (r'\[.*?\]$', Keyword),
-            (r'(.*?)(\s*)(=)(\s*)(.*?)$',
-             bygroups(Name.Attribute, Text, Operator, Text, String))
-        ]
-    }
-
-
-

The lexer first looks for whitespace, comments and section names. Later it -looks for a line that looks like a key, value pair, separated by an '=' -sign, and optional whitespace.

-

The bygroups helper yields each capturing group in the regex with a different -token type. First the Name.Attribute token, then a Text token for the -optional whitespace, after that a Operator token for the equals sign. Then a -Text token for the whitespace again. The rest of the line is returned as -String.

-

Note that for this to work, every part of the match must be inside a capturing -group (a (...)), and there must not be any nested capturing groups. If you -nevertheless need a group, use a non-capturing group defined using this syntax: -(?:some|words|here) (note the ?: after the beginning parenthesis).

-

If you find yourself needing a capturing group inside the regex which shouldn’t -be part of the output but is used in the regular expressions for backreferencing -(eg: r'(<(foo|bar)>)(.*?)(</\2>)'), you can pass None to the bygroups -function and that group will be skipped in the output.

-
-
-

Changing states¶

-

Many lexers need multiple states to work as expected. For example, some -languages allow multiline comments to be nested. Since this is a recursive -pattern it’s impossible to lex just using regular expressions.

-

Here is a lexer that recognizes C++ style comments (multi-line with /* */ -and single-line with // until end of line):

-
from pygments.lexer import RegexLexer
-from pygments.token import *
-
-class CppCommentLexer(RegexLexer):
-    name = 'Example Lexer with states'
-
-    tokens = {
-        'root': [
-            (r'[^/]+', Text),
-            (r'/\*', Comment.Multiline, 'comment'),
-            (r'//.*?$', Comment.Singleline),
-            (r'/', Text)
-        ],
-        'comment': [
-            (r'[^*/]', Comment.Multiline),
-            (r'/\*', Comment.Multiline, '#push'),
-            (r'\*/', Comment.Multiline, '#pop'),
-            (r'[*/]', Comment.Multiline)
-        ]
-    }
-
-
-

This lexer starts lexing in the 'root' state. It tries to match as much as -possible until it finds a slash ('/'). If the next character after the slash -is an asterisk ('*') the RegexLexer sends those two characters to the -output stream marked as Comment.Multiline and continues lexing with the rules -defined in the 'comment' state.

-

If there wasn’t an asterisk after the slash, the RegexLexer checks if it’s a -Singleline comment (i.e. followed by a second slash). If this also wasn’t the -case it must be a single slash, which is not a comment starter (the separate -regex for a single slash must also be given, else the slash would be marked as -an error token).

-

Inside the 'comment' state, we do the same thing again. Scan until the -lexer finds a star or slash. If it’s the opening of a multiline comment, push -the 'comment' state on the stack and continue scanning, again in the -'comment' state. Else, check if it’s the end of the multiline comment. If -yes, pop one state from the stack.

-

Note: If you pop from an empty stack you’ll get an IndexError. (There is an -easy way to prevent this from happening: don’t '#pop' in the root state).

-

If the RegexLexer encounters a newline that is flagged as an error token, the -stack is emptied and the lexer continues scanning in the 'root' state. This -can help producing error-tolerant highlighting for erroneous input, e.g. when a -single-line string is not closed.

-
-
-

Advanced state tricks¶

-

There are a few more things you can do with states:

-
    -
  • You can push multiple states onto the stack if you give a tuple instead of a -simple string as the third item in a rule tuple. For example, if you want to -match a comment containing a directive, something like:

    -
    /* <processing directive>    rest of comment */
    -
    -
    -

    you can use this rule:

    -
    tokens = {
    -    'root': [
    -        (r'/\* <', Comment, ('comment', 'directive')),
    -        ...
    -    ],
    -    'directive': [
    -        (r'[^>]*', Comment.Directive),
    -        (r'>', Comment, '#pop'),
    -    ],
    -    'comment': [
    -        (r'[^*]+', Comment),
    -        (r'\*/', Comment, '#pop'),
    -        (r'\*', Comment),
    -    ]
    -}
    -
    -
    -

    When this encounters the above sample, first 'comment' and 'directive' -are pushed onto the stack, then the lexer continues in the directive state -until it finds the closing >, then it continues in the comment state until -the closing */. Then, both states are popped from the stack again and -lexing continues in the root state.

    -
    -

    New in version 0.9: The tuple can contain the special '#push' and '#pop' (but not -'#pop:n') directives.

    -
    -
  • -
  • You can include the rules of a state in the definition of another. This is -done by using include from pygments.lexer:

    -
    from pygments.lexer import RegexLexer, bygroups, include
    -from pygments.token import *
    -
    -class ExampleLexer(RegexLexer):
    -    tokens = {
    -        'comments': [
    -            (r'/\*.*?\*/', Comment),
    -            (r'//.*?\n', Comment),
    -        ],
    -        'root': [
    -            include('comments'),
    -            (r'(function )(\w+)( {)',
    -             bygroups(Keyword, Name, Keyword), 'function'),
    -            (r'.', Text),
    -        ],
    -        'function': [
    -            (r'[^}/]+', Text),
    -            include('comments'),
    -            (r'/', Text),
    -            (r'\}', Keyword, '#pop'),
    -        ]
    -    }
    -
    -
    -

    This is a hypothetical lexer for a language that consist of functions and -comments. Because comments can occur at toplevel and in functions, we need -rules for comments in both states. As you can see, the include helper saves -repeating rules that occur more than once (in this example, the state -'comment' will never be entered by the lexer, as it’s only there to be -included in 'root' and 'function').

    -
  • -
  • Sometimes, you may want to “combine” a state from existing ones. This is -possible with the combined helper from pygments.lexer.

    -

    If you, instead of a new state, write combined('state1', 'state2') as the -third item of a rule tuple, a new anonymous state will be formed from state1 -and state2 and if the rule matches, the lexer will enter this state.

    -

    This is not used very often, but can be helpful in some cases, such as the -PythonLexer’s string literal processing.

    -
  • -
  • If you want your lexer to start lexing in a different state you can modify the -stack by overriding the get_tokens_unprocessed() method:

    -
    from pygments.lexer import RegexLexer
    -
    -class ExampleLexer(RegexLexer):
    -    tokens = {...}
    -
    -    def get_tokens_unprocessed(self, text, stack=('root', 'otherstate')):
    -        for item in RegexLexer.get_tokens_unprocessed(self, text, stack):
    -            yield item
    -
    -
    -

    Some lexers like the PhpLexer use this to make the leading <?php -preprocessor comments optional. Note that you can crash the lexer easily by -putting values into the stack that don’t exist in the token map. Also -removing 'root' from the stack can result in strange errors!

    -
  • -
  • In some lexers, a state should be popped if anything is encountered that isn’t -matched by a rule in the state. You could use an empty regex at the end of -the state list, but Pygments provides a more obvious way of spelling that: -default('#pop') is equivalent to ('', Text, '#pop').

    -
    -

    New in version 2.0.

    -
    -
  • -
-
-
-

Subclassing lexers derived from RegexLexer¶

-
-

New in version 1.6.

-
-

Sometimes multiple languages are very similar, but should still be lexed by -different lexer classes.

-

When subclassing a lexer derived from RegexLexer, the tokens dictionaries -defined in the parent and child class are merged. For example:

-
from pygments.lexer import RegexLexer, inherit
-from pygments.token import *
-
-class BaseLexer(RegexLexer):
-    tokens = {
-        'root': [
-            ('[a-z]+', Name),
-            (r'/\*', Comment, 'comment'),
-            ('"', String, 'string'),
-            ('\s+', Text),
-        ],
-        'string': [
-            ('[^"]+', String),
-            ('"', String, '#pop'),
-        ],
-        'comment': [
-            ...
-        ],
-    }
-
-class DerivedLexer(BaseLexer):
-    tokens = {
-        'root': [
-            ('[0-9]+', Number),
-            inherit,
-        ],
-        'string': [
-            (r'[^"\\]+', String),
-            (r'\\.', String.Escape),
-            ('"', String, '#pop'),
-        ],
-    }
-
-
-

The BaseLexer defines two states, lexing names and strings. The -DerivedLexer defines its own tokens dictionary, which extends the definitions -of the base lexer:

-
    -
  • The “root” state has an additional rule and then the special object inherit, -which tells Pygments to insert the token definitions of the parent class at -that point.

  • -
  • The “string” state is replaced entirely, since there is not inherit rule.

  • -
  • The “comment” state is inherited entirely.

  • -
-
-
-

Using multiple lexers¶

-

Using multiple lexers for the same input can be tricky. One of the easiest -combination techniques is shown here: You can replace the action entry in a rule -tuple with a lexer class. The matched text will then be lexed with that lexer, -and the resulting tokens will be yielded.

-

For example, look at this stripped-down HTML lexer:

-
from pygments.lexer import RegexLexer, bygroups, using
-from pygments.token import *
-from pygments.lexers.javascript import JavascriptLexer
-
-class HtmlLexer(RegexLexer):
-    name = 'HTML'
-    aliases = ['html']
-    filenames = ['*.html', '*.htm']
-
-    flags = re.IGNORECASE | re.DOTALL
-    tokens = {
-        'root': [
-            ('[^<&]+', Text),
-            ('&.*?;', Name.Entity),
-            (r'<\s*script\s*', Name.Tag, ('script-content', 'tag')),
-            (r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'),
-            (r'<\s*/\s*[a-zA-Z0-9:]+\s*>', Name.Tag),
-        ],
-        'script-content': [
-            (r'(.+?)(<\s*/\s*script\s*>)',
-             bygroups(using(JavascriptLexer), Name.Tag),
-             '#pop'),
-        ]
-    }
-
-
-

Here the content of a <script> tag is passed to a newly created instance of -a JavascriptLexer and not processed by the HtmlLexer. This is done using -the using helper that takes the other lexer class as its parameter.

-

Note the combination of bygroups and using. This makes sure that the -content up to the </script> end tag is processed by the JavascriptLexer, -while the end tag is yielded as a normal token with the Name.Tag type.

-

Also note the (r'<\s*script\s*', Name.Tag, ('script-content', 'tag')) rule. -Here, two states are pushed onto the state stack, 'script-content' and -'tag'. That means that first 'tag' is processed, which will lex -attributes and the closing >, then the 'tag' state is popped and the -next state on top of the stack will be 'script-content'.

-

Since you cannot refer to the class currently being defined, use this -(imported from pygments.lexer) to refer to the current lexer class, i.e. -using(this). This construct may seem unnecessary, but this is often the -most obvious way of lexing arbitrary syntax between fixed delimiters without -introducing deeply nested states.

-

The using() helper has a special keyword argument, state, which works as -follows: if given, the lexer to use initially is not in the "root" state, -but in the state given by this argument. This does not work with advanced -RegexLexer subclasses such as ExtendedRegexLexer (see below).

-

Any other keywords arguments passed to using() are added to the keyword -arguments used to create the lexer.

-
-
-

Delegating Lexer¶

-

Another approach for nested lexers is the DelegatingLexer which is for example -used for the template engine lexers. It takes two lexers as arguments on -initialisation: a root_lexer and a language_lexer.

-

The input is processed as follows: First, the whole text is lexed with the -language_lexer. All tokens yielded with the special type of Other are -then concatenated and given to the root_lexer. The language tokens of the -language_lexer are then inserted into the root_lexer’s token stream at the -appropriate positions.

-
from pygments.lexer import DelegatingLexer
-from pygments.lexers.web import HtmlLexer, PhpLexer
-
-class HtmlPhpLexer(DelegatingLexer):
-    def __init__(self, **options):
-        super(HtmlPhpLexer, self).__init__(HtmlLexer, PhpLexer, **options)
-
-
-

This procedure ensures that e.g. HTML with template tags in it is highlighted -correctly even if the template tags are put into HTML tags or attributes.

-

If you want to change the needle token Other to something else, you can give -the lexer another token type as the third parameter:

-
DelegatingLexer.__init__(MyLexer, OtherLexer, Text, **options)
-
-
-
-
-

Callbacks¶

-

Sometimes the grammar of a language is so complex that a lexer would be unable -to process it just by using regular expressions and stacks.

-

For this, the RegexLexer allows callbacks to be given in rule tuples, instead -of token types (bygroups and using are nothing else but preimplemented -callbacks). The callback must be a function taking two arguments:

-
    -
  • the lexer itself

  • -
  • the match object for the last matched rule

  • -
-

The callback must then return an iterable of (or simply yield) (index, -tokentype, value) tuples, which are then just passed through by -get_tokens_unprocessed(). The index here is the position of the token in -the input string, tokentype is the normal token type (like Name.Builtin), -and value the associated part of the input string.

-

You can see an example here:

-
from pygments.lexer import RegexLexer
-from pygments.token import Generic
-
-class HypotheticLexer(RegexLexer):
-
-    def headline_callback(lexer, match):
-        equal_signs = match.group(1)
-        text = match.group(2)
-        yield match.start(), Generic.Headline, equal_signs + text + equal_signs
-
-    tokens = {
-        'root': [
-            (r'(=+)(.*?)(\1)', headline_callback)
-        ]
-    }
-
-
-

If the regex for the headline_callback matches, the function is called with -the match object. Note that after the callback is done, processing continues -normally, that is, after the end of the previous match. The callback has no -possibility to influence the position.

-

There are not really any simple examples for lexer callbacks, but you can see -them in action e.g. in the SMLLexer class in ml.py.

-
-
-

The ExtendedRegexLexer class¶

-

The RegexLexer, even with callbacks, unfortunately isn’t powerful enough for -the funky syntax rules of languages such as Ruby.

-

But fear not; even then you don’t have to abandon the regular expression -approach: Pygments has a subclass of RegexLexer, the ExtendedRegexLexer. -All features known from RegexLexers are available here too, and the tokens are -specified in exactly the same way, except for one detail:

-

The get_tokens_unprocessed() method holds its internal state data not as local -variables, but in an instance of the pygments.lexer.LexerContext class, and -that instance is passed to callbacks as a third argument. This means that you -can modify the lexer state in callbacks.

-

The LexerContext class has the following members:

-
    -
  • text – the input text

  • -
  • pos – the current starting position that is used for matching regexes

  • -
  • stack – a list containing the state stack

  • -
  • end – the maximum position to which regexes are matched, this defaults to -the length of text

  • -
-

Additionally, the get_tokens_unprocessed() method can be given a -LexerContext instead of a string and will then process this context instead of -creating a new one for the string argument.

-

Note that because you can set the current position to anything in the callback, -it won’t be automatically be set by the caller after the callback is finished. -For example, this is how the hypothetical lexer above would be written with the -ExtendedRegexLexer:

-
from pygments.lexer import ExtendedRegexLexer
-from pygments.token import Generic
-
-class ExHypotheticLexer(ExtendedRegexLexer):
-
-    def headline_callback(lexer, match, ctx):
-        equal_signs = match.group(1)
-        text = match.group(2)
-        yield match.start(), Generic.Headline, equal_signs + text + equal_signs
-        ctx.pos = match.end()
-
-    tokens = {
-        'root': [
-            (r'(=+)(.*?)(\1)', headline_callback)
-        ]
-    }
-
-
-

This might sound confusing (and it can really be). But it is needed, and for an -example look at the Ruby lexer in ruby.py.

-
-
-

Handling Lists of Keywords¶

-

For a relatively short list (hundreds) you can construct an optimized regular -expression directly using words() (longer lists, see next section). This -function handles a few things for you automatically, including escaping -metacharacters and Python’s first-match rather than longest-match in -alternations. Feel free to put the lists themselves in -pygments/lexers/_$lang_builtins.py (see examples there), and generated by -code if possible.

-

An example of using words() is something like:

-
from pygments.lexer import RegexLexer, words, Name
-
-class MyLexer(RegexLexer):
-
-    tokens = {
-        'root': [
-            (words(('else', 'elseif'), suffix=r'\b'), Name.Builtin),
-            (r'\w+', Name),
-        ],
-    }
-
-
-

As you can see, you can add prefix and suffix parts to the constructed -regex.

-
-
-

Modifying Token Streams¶

-

Some languages ship a lot of builtin functions (for example PHP). The total -amount of those functions differs from system to system because not everybody -has every extension installed. In the case of PHP there are over 3000 builtin -functions. That’s an incredibly huge amount of functions, much more than you -want to put into a regular expression.

-

But because only Name tokens can be function names this is solvable by -overriding the get_tokens_unprocessed() method. The following lexer -subclasses the PythonLexer so that it highlights some additional names as -pseudo keywords:

-
from pygments.lexers.python import PythonLexer
-from pygments.token import Name, Keyword
-
-class MyPythonLexer(PythonLexer):
-    EXTRA_KEYWORDS = set(('foo', 'bar', 'foobar', 'barfoo', 'spam', 'eggs'))
-
-    def get_tokens_unprocessed(self, text):
-        for index, token, value in PythonLexer.get_tokens_unprocessed(self, text):
-            if token is Name and value in self.EXTRA_KEYWORDS:
-                yield index, Keyword.Pseudo, value
-            else:
-                yield index, token, value
-
-
-

The PhpLexer and LuaLexer use this method to resolve builtin functions.

-
-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/docs/lexers.html b/doc/_build/html/docs/lexers.html deleted file mode 100644 index 9669146..0000000 --- a/doc/_build/html/docs/lexers.html +++ /dev/null @@ -1,10359 +0,0 @@ - - - - - - - Available lexers — Pygments - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Available lexers¶

-

This page lists all available builtin lexers and the options they take.

-

Currently, all lexers support these options:

-
-
stripnl

Strip leading and trailing newlines from the input (default: True)

-
-
stripall

Strip all leading and trailing whitespace from the input (default: -False).

-
-
ensurenl

Make sure that the input ends with a newline (default: True). This -is required for some lexers that consume input linewise.

-
-

New in version 1.3.

-
-
-
tabsize

If given and greater than 0, expand tabs in the input (default: 0).

-
-
encoding

If given, must be an encoding name (such as "utf-8"). This encoding -will be used to convert the input string to Unicode (if it is not already -a Unicode string). The default is "guess".

-

If this option is set to "guess", a simple UTF-8 vs. Latin-1 -detection is used, if it is set to "chardet", the -chardet library is used to -guess the encoding of the input.

-
-

New in version 0.6.

-
-
-
-

The “Short Names” field lists the identifiers that can be used with the -get_lexer_by_name() function.

-

These lexers are builtin and can be imported from pygments.lexers:

-
-

Lexers for ActionScript and MXML¶

-
-
-class pygments.lexers.actionscript.ActionScript3Lexer¶
-
-
Short names
-

as3, actionscript3

-
-
Filenames
-

*.as

-
-
MIME types
-

application/x-actionscript3, text/x-actionscript3, text/actionscript3

-
-
-

For ActionScript 3 source code.

-
-

New in version 0.11.

-
-
- -
-
-class pygments.lexers.actionscript.ActionScriptLexer¶
-
-
Short names
-

as, actionscript

-
-
Filenames
-

*.as

-
-
MIME types
-

application/x-actionscript, text/x-actionscript, text/actionscript

-
-
-

For ActionScript source code.

-
-

New in version 0.9.

-
-
- -
-
-class pygments.lexers.actionscript.MxmlLexer¶
-
-
Short names
-

mxml

-
-
Filenames
-

*.mxml

-
-
MIME types
-

None

-
-
-

For MXML markup. -Nested AS3 in <script> tags is highlighted by the appropriate lexer.

-
-

New in version 1.1.

-
-
- -
-
-

Lexers for computer algebra systems¶

-
-
-class pygments.lexers.algebra.BCLexer¶
-
-
Short names
-

bc

-
-
Filenames
-

*.bc

-
-
MIME types
-

None

-
-
-

A BC lexer.

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.algebra.GAPLexer¶
-
-
Short names
-

gap

-
-
Filenames
-

*.g, *.gd, *.gi, *.gap

-
-
MIME types
-

None

-
-
-

For GAP source code.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.algebra.MathematicaLexer¶
-
-
Short names
-

mathematica, mma, nb

-
-
Filenames
-

*.nb, *.cdf, *.nbp, *.ma

-
-
MIME types
-

application/mathematica, application/vnd.wolfram.mathematica, application/vnd.wolfram.mathematica.package, application/vnd.wolfram.cdf

-
-
-

Lexer for Mathematica source code.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.algebra.MuPADLexer¶
-
-
Short names
-

mupad

-
-
Filenames
-

*.mu

-
-
MIME types
-

None

-
-
-

A MuPAD lexer. -Contributed by Christopher Creutzig <christopher@creutzig.de>.

-
-

New in version 0.8.

-
-
- -
-
-

Lexers for AmbientTalk language¶

-
-
-class pygments.lexers.ambient.AmbientTalkLexer¶
-
-
Short names
-

at, ambienttalk, ambienttalk/2

-
-
Filenames
-

*.at

-
-
MIME types
-

text/x-ambienttalk

-
-
-

Lexer for AmbientTalk source code.

-
-

New in version 2.0.

-
-
- -
-
-

Lexers for the AMPL language¶

-
-
-class pygments.lexers.ampl.AmplLexer¶
-
-
Short names
-

ampl

-
-
Filenames
-

*.run

-
-
MIME types
-

None

-
-
-

For AMPL source code.

-
-

New in version 2.2.

-
-
- -
-
-

Lexers for APL¶

-
-
-class pygments.lexers.apl.APLLexer¶
-
-
Short names
-

apl

-
-
Filenames
-

*.apl

-
-
MIME types
-

None

-
-
-

A simple APL lexer.

-
-

New in version 2.0.

-
-
- -
- -
-

Lexers for assembly languages¶

-
-
-class pygments.lexers.asm.CObjdumpLexer¶
-
-
Short names
-

c-objdump

-
-
Filenames
-

*.c-objdump

-
-
MIME types
-

text/x-c-objdump

-
-
-

For the output of ‘objdump -Sr on compiled C files’

-
- -
-
-class pygments.lexers.asm.Ca65Lexer¶
-
-
Short names
-

ca65

-
-
Filenames
-

*.s

-
-
MIME types
-

None

-
-
-

For ca65 assembler sources.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.asm.CppObjdumpLexer¶
-
-
Short names
-

cpp-objdump, c++-objdumb, cxx-objdump

-
-
Filenames
-

*.cpp-objdump, *.c++-objdump, *.cxx-objdump

-
-
MIME types
-

text/x-cpp-objdump

-
-
-

For the output of ‘objdump -Sr on compiled C++ files’

-
- -
-
-class pygments.lexers.asm.DObjdumpLexer¶
-
-
Short names
-

d-objdump

-
-
Filenames
-

*.d-objdump

-
-
MIME types
-

text/x-d-objdump

-
-
-

For the output of ‘objdump -Sr on compiled D files’

-
- -
-
-class pygments.lexers.asm.Dasm16Lexer¶
-
-
Short names
-

dasm16

-
-
Filenames
-

*.dasm16, *.dasm

-
-
MIME types
-

text/x-dasm16

-
-
-

Simple lexer for DCPU-16 Assembly

-

Check http://0x10c.com/doc/dcpu-16.txt

-
-

New in version 2.4.

-
-
- -
-
-class pygments.lexers.asm.GasLexer¶
-
-
Short names
-

gas, asm

-
-
Filenames
-

*.s, *.S

-
-
MIME types
-

text/x-gas

-
-
-

For Gas (AT&T) assembly code.

-
- -
-
-class pygments.lexers.asm.HsailLexer¶
-
-
Short names
-

hsail, hsa

-
-
Filenames
-

*.hsail

-
-
MIME types
-

text/x-hsail

-
-
-

For HSAIL assembly code.

-
-

New in version 2.2.

-
-
- -
-
-class pygments.lexers.asm.LlvmLexer¶
-
-
Short names
-

llvm

-
-
Filenames
-

*.ll

-
-
MIME types
-

text/x-llvm

-
-
-

For LLVM assembly code.

-
- -
-
-class pygments.lexers.asm.NasmLexer¶
-
-
Short names
-

nasm

-
-
Filenames
-

*.asm, *.ASM

-
-
MIME types
-

text/x-nasm

-
-
-

For Nasm (Intel) assembly code.

-
- -
-
-class pygments.lexers.asm.NasmObjdumpLexer¶
-
-
Short names
-

objdump-nasm

-
-
Filenames
-

*.objdump-intel

-
-
MIME types
-

text/x-nasm-objdump

-
-
-

For the output of ‘objdump -d -M intel’.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.asm.ObjdumpLexer¶
-
-
Short names
-

objdump

-
-
Filenames
-

*.objdump

-
-
MIME types
-

text/x-objdump

-
-
-

For the output of ‘objdump -dr’

-
- -
-
-class pygments.lexers.asm.TasmLexer¶
-
-
Short names
-

tasm

-
-
Filenames
-

*.asm, *.ASM, *.tasm

-
-
MIME types
-

text/x-tasm

-
-
-

For Tasm (Turbo Assembler) assembly code.

-
- -
-
-

Lexers for automation scripting languages¶

-
-
-class pygments.lexers.automation.AutoItLexer¶
-
-
Short names
-

autoit

-
-
Filenames
-

*.au3

-
-
MIME types
-

text/x-autoit

-
-
-

For AutoIt files.

-

AutoIt is a freeware BASIC-like scripting language -designed for automating the Windows GUI and general scripting

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.automation.AutohotkeyLexer¶
-
-
Short names
-

ahk, autohotkey

-
-
Filenames
-

*.ahk, *.ahkl

-
-
MIME types
-

text/x-autohotkey

-
-
-

For autohotkey source code.

-
-

New in version 1.4.

-
-
- -
-
-

Lexers for BASIC like languages (other than VB.net)¶

-
-
-class pygments.lexers.basic.BBCBasicLexer¶
-
-
Short names
-

bbcbasic

-
-
Filenames
-

*.bbc

-
-
MIME types
-

None

-
-
-

BBC Basic was supplied on the BBC Micro, and later Acorn RISC OS. -It is also used by BBC Basic For Windows.

-
-

New in version 2.4.

-
-
- -
-
-class pygments.lexers.basic.BlitzBasicLexer¶
-
-
Short names
-

blitzbasic, b3d, bplus

-
-
Filenames
-

*.bb, *.decls

-
-
MIME types
-

text/x-bb

-
-
-

For BlitzBasic source code.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.basic.BlitzMaxLexer¶
-
-
Short names
-

blitzmax, bmax

-
-
Filenames
-

*.bmx

-
-
MIME types
-

text/x-bmx

-
-
-

For BlitzMax source code.

-
-

New in version 1.4.

-
-
- -
-
-class pygments.lexers.basic.CbmBasicV2Lexer¶
-
-
Short names
-

cbmbas

-
-
Filenames
-

*.bas

-
-
MIME types
-

None

-
-
-

For CBM BASIC V2 sources.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.basic.MonkeyLexer¶
-
-
Short names
-

monkey

-
-
Filenames
-

*.monkey

-
-
MIME types
-

text/x-monkey

-
-
-

For -Monkey -source code.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.basic.QBasicLexer¶
-
-
Short names
-

qbasic, basic

-
-
Filenames
-

*.BAS, *.bas

-
-
MIME types
-

text/basic

-
-
-

For -QBasic -source code.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.basic.VBScriptLexer¶
-
-
Short names
-

vbscript

-
-
Filenames
-

*.vbs, *.VBS

-
-
MIME types
-

None

-
-
-

VBScript is scripting language that is modeled on Visual Basic.

-
-

New in version 2.4.

-
-
- -
-
-

Lexers for BibTeX bibliography data and styles¶

-
-
-class pygments.lexers.bibtex.BSTLexer¶
-
-
Short names
-

bst, bst-pybtex

-
-
Filenames
-

*.bst

-
-
MIME types
-

None

-
-
-

A lexer for BibTeX bibliography styles.

-
-

New in version 2.2.

-
-
- -
-
-class pygments.lexers.bibtex.BibTeXLexer¶
-
-
Short names
-

bib, bibtex

-
-
Filenames
-

*.bib

-
-
MIME types
-

text/x-bibtex

-
-
-

A lexer for BibTeX bibliography data format.

-
-

New in version 2.2.

-
-
- -
-
-

Lexers for the Boa language¶

-
-
-class pygments.lexers.boa.BoaLexer¶
-
-
Short names
-

boa

-
-
Filenames
-

*.boa

-
-
MIME types
-

None

-
-
-

Lexer for the Boa language.

-
-

New in version 2.4.

-
-
- -
-
-

Lexers for “business-oriented” languages¶

-
-
-class pygments.lexers.business.ABAPLexer¶
-
-
Short names
-

abap

-
-
Filenames
-

*.abap, *.ABAP

-
-
MIME types
-

text/x-abap

-
-
-

Lexer for ABAP, SAP’s integrated language.

-
-

New in version 1.1.

-
-
- -
-
-class pygments.lexers.business.CobolFreeformatLexer¶
-
-
Short names
-

cobolfree

-
-
Filenames
-

*.cbl, *.CBL

-
-
MIME types
-

None

-
-
-

Lexer for Free format OpenCOBOL code.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.business.CobolLexer¶
-
-
Short names
-

cobol

-
-
Filenames
-

*.cob, *.COB, *.cpy, *.CPY

-
-
MIME types
-

text/x-cobol

-
-
-

Lexer for OpenCOBOL code.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.business.GoodDataCLLexer¶
-
-
Short names
-

gooddata-cl

-
-
Filenames
-

*.gdc

-
-
MIME types
-

text/x-gooddata-cl

-
-
-

Lexer for GoodData-CL -script files.

-
-

New in version 1.4.

-
-
- -
-
-class pygments.lexers.business.MaqlLexer¶
-
-
Short names
-

maql

-
-
Filenames
-

*.maql

-
-
MIME types
-

text/x-gooddata-maql, application/x-gooddata-maql

-
-
-

Lexer for GoodData MAQL -scripts.

-
-

New in version 1.4.

-
-
- -
-
-class pygments.lexers.business.OpenEdgeLexer¶
-
-
Short names
-

openedge, abl, progress

-
-
Filenames
-

*.p, *.cls

-
-
MIME types
-

text/x-openedge, application/x-openedge

-
-
-

Lexer for OpenEdge ABL (formerly Progress) source code.

-
-

New in version 1.5.

-
-
- -
-
-

Lexers for C/C++ languages¶

-
-
-class pygments.lexers.c_cpp.CLexer¶
-
-
Short names
-

c

-
-
Filenames
-

*.c, *.h, *.idc

-
-
MIME types
-

text/x-chdr, text/x-csrc

-
-
-

For C source code with preprocessor directives.

-
- -
-
-class pygments.lexers.c_cpp.CppLexer¶
-
-
Short names
-

cpp, c++

-
-
Filenames
-

*.cpp, *.hpp, *.c++, *.h++, *.cc, *.hh, *.cxx, *.hxx, *.C, *.H, *.cp, *.CPP

-
-
MIME types
-

text/x-c++hdr, text/x-c++src

-
-
-

For C++ source code with preprocessor directives.

-
- -
-
-

Lexers for other C-like languages¶

-
-
-class pygments.lexers.c_like.ArduinoLexer¶
-
-
Short names
-

arduino

-
-
Filenames
-

*.ino

-
-
MIME types
-

text/x-arduino

-
-
-

For Arduino(tm) source.

-

This is an extension of the CppLexer, as the Arduino® Language is a superset -of C++

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.c_like.CharmciLexer¶
-
-
Short names
-

charmci

-
-
Filenames
-

*.ci

-
-
MIME types
-

None

-
-
-

For Charm++ interface files (.ci).

-
-

New in version 2.4.

-
-
- -
-
-class pygments.lexers.c_like.ClayLexer¶
-
-
Short names
-

clay

-
-
Filenames
-

*.clay

-
-
MIME types
-

text/x-clay

-
-
-

For Clay source.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.c_like.CudaLexer¶
-
-
Short names
-

cuda, cu

-
-
Filenames
-

*.cu, *.cuh

-
-
MIME types
-

text/x-cuda

-
-
-

For NVIDIA CUDA™ -source.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.c_like.ECLexer¶
-
-
Short names
-

ec

-
-
Filenames
-

*.ec, *.eh

-
-
MIME types
-

text/x-echdr, text/x-ecsrc

-
-
-

For eC source code with preprocessor directives.

-
-

New in version 1.5.

-
-
- -
-
-class pygments.lexers.c_like.MqlLexer¶
-
-
Short names
-

mql, mq4, mq5, mql4, mql5

-
-
Filenames
-

*.mq4, *.mq5, *.mqh

-
-
MIME types
-

text/x-mql

-
-
-

For MQL4 and -MQL5 source code.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.c_like.NesCLexer¶
-
-
Short names
-

nesc

-
-
Filenames
-

*.nc

-
-
MIME types
-

text/x-nescsrc

-
-
-

For nesC source code with preprocessor -directives.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.c_like.PikeLexer¶
-
-
Short names
-

pike

-
-
Filenames
-

*.pike, *.pmod

-
-
MIME types
-

text/x-pike

-
-
-

For Pike source code.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.c_like.SwigLexer¶
-
-
Short names
-

swig

-
-
Filenames
-

*.swg, *.i

-
-
MIME types
-

text/swig

-
-
-

For SWIG source code.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.c_like.ValaLexer¶
-
-
Short names
-

vala, vapi

-
-
Filenames
-

*.vala, *.vapi

-
-
MIME types
-

text/x-vala

-
-
-

For Vala source code with preprocessor directives.

-
-

New in version 1.1.

-
-
- -
-
-

Lexers for the Cap’n Proto schema language¶

-
-
-class pygments.lexers.capnproto.CapnProtoLexer¶
-
-
Short names
-

capnp

-
-
Filenames
-

*.capnp

-
-
MIME types
-

None

-
-
-

For Cap’n Proto source.

-
-

New in version 2.2.

-
-
- -
-
-

Lexer for the Chapel language¶

-
-
-class pygments.lexers.chapel.ChapelLexer¶
-
-
Short names
-

chapel, chpl

-
-
Filenames
-

*.chpl

-
-
MIME types
-

None

-
-
-

For Chapel source.

-
-

New in version 2.0.

-
-
- -
-
-

Lexer for the Clean language¶

-
-
-class pygments.lexers.clean.CleanLexer¶
-
-
Short names
-

clean

-
-
Filenames
-

*.icl, *.dcl

-
-
MIME types
-

None

-
-
-

Lexer for the general purpose, state-of-the-art, pure and lazy functional -programming language Clean (http://clean.cs.ru.nl/Clean).

-
- -
-
-

Lexers for configuration file formats¶

-
-
-class pygments.lexers.configs.ApacheConfLexer¶
-
-
Short names
-

apacheconf, aconf, apache

-
-
Filenames
-

.htaccess, apache.conf, apache2.conf

-
-
MIME types
-

text/x-apacheconf

-
-
-

Lexer for configuration files following the Apache config file -format.

-
-

New in version 0.6.

-
-
- -
-
-class pygments.lexers.configs.AugeasLexer¶
-
-
Short names
-

augeas

-
-
Filenames
-

*.aug

-
-
MIME types
-

None

-
-
-

Lexer for Augeas.

-
-

New in version 2.4.

-
-
- -
-
-class pygments.lexers.configs.Cfengine3Lexer¶
-
-
Short names
-

cfengine3, cf3

-
-
Filenames
-

*.cf

-
-
MIME types
-

None

-
-
-

Lexer for CFEngine3 policy files.

-
-

New in version 1.5.

-
-
- -
-
-class pygments.lexers.configs.DockerLexer¶
-
-
Short names
-

docker, dockerfile

-
-
Filenames
-

Dockerfile, *.docker

-
-
MIME types
-

text/x-dockerfile-config

-
-
-

Lexer for Docker configuration files.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.configs.IniLexer¶
-
-
Short names
-

ini, cfg, dosini

-
-
Filenames
-

*.ini, *.cfg, *.inf

-
-
MIME types
-

text/x-ini, text/inf

-
-
-

Lexer for configuration files in INI style.

-
- -
-
-class pygments.lexers.configs.KconfigLexer¶
-
-
Short names
-

kconfig, menuconfig, linux-config, kernel-config

-
-
Filenames
-

Kconfig, *Config.in*, external.in*, standard-modules.in

-
-
MIME types
-

text/x-kconfig

-
-
-

For Linux-style Kconfig files.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.configs.LighttpdConfLexer¶
-
-
Short names
-

lighty, lighttpd

-
-
Filenames
-

None

-
-
MIME types
-

text/x-lighttpd-conf

-
-
-

Lexer for Lighttpd configuration files.

-
-

New in version 0.11.

-
-
- -
-
-class pygments.lexers.configs.NginxConfLexer¶
-
-
Short names
-

nginx

-
-
Filenames
-

nginx.conf

-
-
MIME types
-

text/x-nginx-conf

-
-
-

Lexer for Nginx configuration files.

-
-

New in version 0.11.

-
-
- -
-
-class pygments.lexers.configs.PacmanConfLexer¶
-
-
Short names
-

pacmanconf

-
-
Filenames
-

pacman.conf

-
-
MIME types
-

None

-
-
-

Lexer for pacman.conf.

-

Actually, IniLexer works almost fine for this format, -but it yield error token. It is because pacman.conf has -a form without assignment like:

-
-

UseSyslog -Color -TotalDownload -CheckSpace -VerbosePkgLists

-
-

These are flags to switch on.

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.configs.PkgConfigLexer¶
-
-
Short names
-

pkgconfig

-
-
Filenames
-

*.pc

-
-
MIME types
-

None

-
-
-

Lexer for pkg-config -(see also manual page).

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.configs.PropertiesLexer¶
-
-
Short names
-

properties, jproperties

-
-
Filenames
-

*.properties

-
-
MIME types
-

text/x-java-properties

-
-
-

Lexer for configuration files in Java’s properties format.

-

Note: trailing whitespace counts as part of the value as per spec

-
-

New in version 1.4.

-
-
- -
-
-class pygments.lexers.configs.RegeditLexer¶
-
-
Short names
-

registry

-
-
Filenames
-

*.reg

-
-
MIME types
-

text/x-windows-registry

-
-
-

Lexer for Windows Registry files produced -by regedit.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.configs.SquidConfLexer¶
-
-
Short names
-

squidconf, squid.conf, squid

-
-
Filenames
-

squid.conf

-
-
MIME types
-

text/x-squidconf

-
-
-

Lexer for squid configuration files.

-
-

New in version 0.9.

-
-
- -
-
-class pygments.lexers.configs.TOMLLexer¶
-
-
Short names
-

toml

-
-
Filenames
-

*.toml

-
-
MIME types
-

None

-
-
-

Lexer for TOML, a simple language -for config files.

-
-

New in version 2.4.

-
-
- -
-
-class pygments.lexers.configs.TermcapLexer¶
-
-
Short names
-

termcap

-
-
Filenames
-

termcap, termcap.src

-
-
MIME types
-

None

-
-
-

Lexer for termcap database source.

-

This is very simple and minimal.

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.configs.TerminfoLexer¶
-
-
Short names
-

terminfo

-
-
Filenames
-

terminfo, terminfo.src

-
-
MIME types
-

None

-
-
-

Lexer for terminfo database source.

-

This is very simple and minimal.

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.configs.TerraformLexer¶
-
-
Short names
-

terraform, tf

-
-
Filenames
-

*.tf

-
-
MIME types
-

application/x-tf, application/x-terraform

-
-
-

Lexer for terraformi .tf files.

-
-

New in version 2.1.

-
-
- -
-
-

Lexers for misc console output¶

-
-
-class pygments.lexers.console.PyPyLogLexer¶
-
-
Short names
-

pypylog, pypy

-
-
Filenames
-

*.pypylog

-
-
MIME types
-

application/x-pypylog

-
-
-

Lexer for PyPy log files.

-
-

New in version 1.5.

-
-
- -
-
-class pygments.lexers.console.VCTreeStatusLexer¶
-
-
Short names
-

vctreestatus

-
-
Filenames
-

None

-
-
MIME types
-

None

-
-
-

For colorizing output of version control status commands, like “hg -status” or “svn status”.

-
-

New in version 2.0.

-
-
- -
-
-

Lexer for Crystal¶

-
-
-class pygments.lexers.crystal.CrystalLexer¶
-
-
Short names
-

cr, crystal

-
-
Filenames
-

*.cr

-
-
MIME types
-

text/x-crystal

-
-
-

For Crystal source code.

-
-

New in version 2.2.

-
-
- -
-
-

Lexers for Csound languages¶

-
-
-class pygments.lexers.csound.CsoundDocumentLexer¶
-
-
Short names
-

csound-document, csound-csd

-
-
Filenames
-

*.csd

-
-
MIME types
-

None

-
-
-

For Csound documents.

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.csound.CsoundOrchestraLexer¶
-
-
Short names
-

csound, csound-orc

-
-
Filenames
-

*.orc, *.udo

-
-
MIME types
-

None

-
-
-

For Csound orchestras.

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.csound.CsoundScoreLexer¶
-
-
Short names
-

csound-score, csound-sco

-
-
Filenames
-

*.sco

-
-
MIME types
-

None

-
-
-

For Csound scores.

-
-

New in version 2.1.

-
-
- -
- -
-

Lexers for D languages¶

-
-
-class pygments.lexers.d.CrocLexer¶
-
-
Short names
-

croc

-
-
Filenames
-

*.croc

-
-
MIME types
-

text/x-crocsrc

-
-
-

For Croc source.

-
- -
-
-class pygments.lexers.d.DLexer¶
-
-
Short names
-

d

-
-
Filenames
-

*.d, *.di

-
-
MIME types
-

text/x-dsrc

-
-
-

For D source.

-
-

New in version 1.2.

-
-
- -
-
-class pygments.lexers.d.MiniDLexer¶
-
-
Short names
-

minid

-
-
Filenames
-

None

-
-
MIME types
-

text/x-minidsrc

-
-
-

For MiniD source. MiniD is now known as Croc.

-
- -
- -
-

Lexers for data file format¶

-
-
-class pygments.lexers.data.JsonBareObjectLexer¶
-
-
Short names
-

json-object

-
-
Filenames
-

None

-
-
MIME types
-

application/json-object

-
-
-

For JSON data structures (with missing object curly braces).

-
-

New in version 2.2.

-
-
- -
-
-class pygments.lexers.data.JsonLdLexer¶
-
-
Short names
-

jsonld, json-ld

-
-
Filenames
-

*.jsonld

-
-
MIME types
-

application/ld+json

-
-
-

For JSON-LD linked data.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.data.JsonLexer¶
-
-
Short names
-

json

-
-
Filenames
-

*.json

-
-
MIME types
-

application/json

-
-
-

For JSON data structures.

-
-

New in version 1.5.

-
-
- -
-
-class pygments.lexers.data.YamlLexer¶
-
-
Short names
-

yaml

-
-
Filenames
-

*.yaml, *.yml

-
-
MIME types
-

text/x-yaml

-
-
-

Lexer for YAML, a human-friendly data serialization -language.

-
-

New in version 0.11.

-
-
- -
-
-

Lexers for diff/patch formats¶

-
-
-class pygments.lexers.diff.DarcsPatchLexer¶
-
-
Short names
-

dpatch

-
-
Filenames
-

*.dpatch, *.darcspatch

-
-
MIME types
-

None

-
-
-

DarcsPatchLexer is a lexer for the various versions of the darcs patch -format. Examples of this format are derived by commands such as -darcs annotate --patch and darcs send.

-
-

New in version 0.10.

-
-
- -
-
-class pygments.lexers.diff.DiffLexer¶
-
-
Short names
-

diff, udiff

-
-
Filenames
-

*.diff, *.patch

-
-
MIME types
-

text/x-diff, text/x-patch

-
-
-

Lexer for unified or context-style diffs or patches.

-
- -
-
-class pygments.lexers.diff.WDiffLexer¶
-
-
Short names
-

wdiff

-
-
Filenames
-

*.wdiff

-
-
MIME types
-

None

-
-
-

A wdiff lexer.

-

Note that:

-
    -
  • only to normal output (without option like -l).

  • -
  • if target files of wdiff contain “[-“, “-]”, “{+”, “+}”, -especially they are unbalanced, this lexer will get confusing.

  • -
-
-

New in version 2.2.

-
-
- -
-
-

Lexers for .net languages¶

-
-
-class pygments.lexers.dotnet.BooLexer¶
-
-
Short names
-

boo

-
-
Filenames
-

*.boo

-
-
MIME types
-

text/x-boo

-
-
-

For Boo source code.

-
- -
-
-class pygments.lexers.dotnet.CSharpAspxLexer¶
-
-
Short names
-

aspx-cs

-
-
Filenames
-

*.aspx, *.asax, *.ascx, *.ashx, *.asmx, *.axd

-
-
MIME types
-

None

-
-
-

Lexer for highlighting C# within ASP.NET pages.

-
- -
-
-class pygments.lexers.dotnet.CSharpLexer¶
-
-
Short names
-

csharp, c#

-
-
Filenames
-

*.cs

-
-
MIME types
-

text/x-csharp

-
-
-

For C# -source code.

-

Additional options accepted:

-
-
unicodelevel

Determines which Unicode characters this lexer allows for identifiers. -The possible values are:

-
    -
  • none – only the ASCII letters and numbers are allowed. This -is the fastest selection.

  • -
  • basic – all Unicode characters from the specification except -category Lo are allowed.

  • -
  • full – all Unicode characters as specified in the C# specs -are allowed. Note that this means a considerable slowdown since the -Lo category has more than 40,000 characters in it!

  • -
-

The default value is basic.

-
-

New in version 0.8.

-
-
-
-
- -
-
-class pygments.lexers.dotnet.FSharpLexer¶
-
-
Short names
-

fsharp, f#

-
-
Filenames
-

*.fs, *.fsi

-
-
MIME types
-

text/x-fsharp

-
-
-

For the F# language (version 3.0).

-
-

New in version 1.5.

-
-
- -
-
-class pygments.lexers.dotnet.NemerleLexer¶
-
-
Short names
-

nemerle

-
-
Filenames
-

*.n

-
-
MIME types
-

text/x-nemerle

-
-
-

For Nemerle source code.

-

Additional options accepted:

-
-
unicodelevel

Determines which Unicode characters this lexer allows for identifiers. -The possible values are:

-
    -
  • none – only the ASCII letters and numbers are allowed. This -is the fastest selection.

  • -
  • basic – all Unicode characters from the specification except -category Lo are allowed.

  • -
  • full – all Unicode characters as specified in the C# specs -are allowed. Note that this means a considerable slowdown since the -Lo category has more than 40,000 characters in it!

  • -
-

The default value is basic.

-
-
-
-

New in version 1.5.

-
-
- -
-
-class pygments.lexers.dotnet.VbNetAspxLexer¶
-
-
Short names
-

aspx-vb

-
-
Filenames
-

*.aspx, *.asax, *.ascx, *.ashx, *.asmx, *.axd

-
-
MIME types
-

None

-
-
-

Lexer for highlighting Visual Basic.net within ASP.NET pages.

-
- -
-
-class pygments.lexers.dotnet.VbNetLexer¶
-
-
Short names
-

vb.net, vbnet

-
-
Filenames
-

*.vb, *.bas

-
-
MIME types
-

text/x-vbnet, text/x-vba

-
-
-

For -Visual Basic.NET -source code.

-
- -
-
-

Lexers for various domain-specific languages¶

-
-
-class pygments.lexers.dsls.AlloyLexer¶
-
-
Short names
-

alloy

-
-
Filenames
-

*.als

-
-
MIME types
-

text/x-alloy

-
-
-

For Alloy source code.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.dsls.CrmshLexer¶
-
-
Short names
-

crmsh, pcmk

-
-
Filenames
-

*.crmsh, *.pcmk

-
-
MIME types
-

None

-
-
-

Lexer for crmsh configuration files -for Pacemaker clusters.

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.dsls.FlatlineLexer¶
-
-
Short names
-

flatline

-
-
Filenames
-

None

-
-
MIME types
-

text/x-flatline

-
-
-

Lexer for Flatline expressions.

-
-

New in version 2.2.

-
-
- -
-
-class pygments.lexers.dsls.MscgenLexer¶
-
-
Short names
-

mscgen, msc

-
-
Filenames
-

*.msc

-
-
MIME types
-

None

-
-
-

For Mscgen files.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.dsls.PanLexer¶
-
-
Short names
-

pan

-
-
Filenames
-

*.pan

-
-
MIME types
-

None

-
-
-

Lexer for pan source files.

-

Based on tcsh lexer.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.dsls.ProtoBufLexer¶
-
-
Short names
-

protobuf, proto

-
-
Filenames
-

*.proto

-
-
MIME types
-

None

-
-
-

Lexer for Protocol Buffer -definition files.

-
-

New in version 1.4.

-
-
- -
-
-class pygments.lexers.dsls.PuppetLexer¶
-
-
Short names
-

puppet

-
-
Filenames
-

*.pp

-
-
MIME types
-

None

-
-
-

For Puppet configuration DSL.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.dsls.RslLexer¶
-
-
Short names
-

rsl

-
-
Filenames
-

*.rsl

-
-
MIME types
-

text/rsl

-
-
-

RSL is the formal specification -language used in RAISE (Rigorous Approach to Industrial Software Engineering) -method.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.dsls.SnowballLexer¶
-
-
Short names
-

snowball

-
-
Filenames
-

*.sbl

-
-
MIME types
-

None

-
-
-

Lexer for Snowball source code.

-
-

New in version 2.2.

-
-
- -
-
-class pygments.lexers.dsls.ThriftLexer¶
-
-
Short names
-

thrift

-
-
Filenames
-

*.thrift

-
-
MIME types
-

application/x-thrift

-
-
-

For Thrift interface definitions.

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.dsls.VGLLexer¶
-
-
Short names
-

vgl

-
-
Filenames
-

*.rpf

-
-
MIME types
-

None

-
-
-

For SampleManager VGL -source code.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.dsls.ZeekLexer¶
-
-
Short names
-

zeek, bro

-
-
Filenames
-

*.zeek, *.bro

-
-
MIME types
-

None

-
-
-

For Zeek scripts.

-
-

New in version 2.5.

-
-
- -
-
-

Lexers for the Dylan language¶

-
-
-class pygments.lexers.dylan.DylanConsoleLexer¶
-
-
Short names
-

dylan-console, dylan-repl

-
-
Filenames
-

*.dylan-console

-
-
MIME types
-

text/x-dylan-console

-
-
-

For Dylan interactive console output like:

-
? let a = 1;
-=> 1
-? a
-=> 1
-
-
-

This is based on a copy of the RubyConsoleLexer.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.dylan.DylanLexer¶
-
-
Short names
-

dylan

-
-
Filenames
-

*.dylan, *.dyl, *.intr

-
-
MIME types
-

text/x-dylan

-
-
-

For the Dylan language.

-
-

New in version 0.7.

-
-
- -
-
-class pygments.lexers.dylan.DylanLidLexer¶
-
-
Short names
-

dylan-lid, lid

-
-
Filenames
-

*.lid, *.hdp

-
-
MIME types
-

text/x-dylan-lid

-
-
-

For Dylan LID (Library Interchange Definition) files.

-
-

New in version 1.6.

-
-
- -
-
-

Lexers for the ECL language¶

-
-
-class pygments.lexers.ecl.ECLLexer¶
-
-
Short names
-

ecl

-
-
Filenames
-

*.ecl

-
-
MIME types
-

application/x-ecl

-
-
-

Lexer for the declarative big-data ECL -language.

-
-

New in version 1.5.

-
-
- -
-
-

Lexer for the Eiffel language¶

-
-
-class pygments.lexers.eiffel.EiffelLexer¶
-
-
Short names
-

eiffel

-
-
Filenames
-

*.e

-
-
MIME types
-

text/x-eiffel

-
-
-

For Eiffel source code.

-
-

New in version 2.0.

-
-
- -
-
-

Lexer for the Elm programming language¶

-
-
-class pygments.lexers.elm.ElmLexer¶
-
-
Short names
-

elm

-
-
Filenames
-

*.elm

-
-
MIME types
-

text/x-elm

-
-
-

For Elm source code.

-
-

New in version 2.1.

-
-
- -
-
-

Lexer for the raw E-mail¶

-
-
-class pygments.lexers.email.EmailLexer¶
-
-
Short names
-

email, eml

-
-
Filenames
-

*.eml

-
-
MIME types
-

message/rfc822

-
-
-

Lexer for raw E-mail.

-

Additional options accepted:

-
-
highlight-X-header

Highlight the fields of X- user-defined email header. (default: -False).

-
-
-
-

New in version 2.5.

-
-
- -
-
-

Lexers for Erlang¶

-
-
-class pygments.lexers.erlang.ElixirConsoleLexer¶
-
-
Short names
-

iex

-
-
Filenames
-

None

-
-
MIME types
-

text/x-elixir-shellsession

-
-
-

For Elixir interactive console (iex) output like:

-
iex> [head | tail] = [1,2,3]
-[1,2,3]
-iex> head
-1
-iex> tail
-[2,3]
-iex> [head | tail]
-[1,2,3]
-iex> length [head | tail]
-3
-
-
-
-

New in version 1.5.

-
-
- -
-
-class pygments.lexers.erlang.ElixirLexer¶
-
-
Short names
-

elixir, ex, exs

-
-
Filenames
-

*.ex, *.exs

-
-
MIME types
-

text/x-elixir

-
-
-

For the Elixir language.

-
-

New in version 1.5.

-
-
- -
-
-class pygments.lexers.erlang.ErlangLexer¶
-
-
Short names
-

erlang

-
-
Filenames
-

*.erl, *.hrl, *.es, *.escript

-
-
MIME types
-

text/x-erlang

-
-
-

For the Erlang functional programming language.

-

Blame Jeremy Thurgood (http://jerith.za.net/).

-
-

New in version 0.9.

-
-
- -
-
-class pygments.lexers.erlang.ErlangShellLexer¶
-
-
Short names
-

erl

-
-
Filenames
-

*.erl-sh

-
-
MIME types
-

text/x-erl-shellsession

-
-
-

Shell sessions in erl (for Erlang code).

-
-

New in version 1.1.

-
-
- -
-
-

Lexers for esoteric languages¶

-
-
-class pygments.lexers.esoteric.AheuiLexer¶
-
-
Short names
-

aheui

-
-
Filenames
-

*.aheui

-
-
MIME types
-

None

-
-
-

Aheui Lexer.

-

Aheui is esoteric language based on Korean alphabets.

-
- -
-
-class pygments.lexers.esoteric.BefungeLexer¶
-
-
Short names
-

befunge

-
-
Filenames
-

*.befunge

-
-
MIME types
-

application/x-befunge

-
-
-

Lexer for the esoteric Befunge -language.

-
-

New in version 0.7.

-
-
- -
-
-class pygments.lexers.esoteric.BrainfuckLexer¶
-
-
Short names
-

brainfuck, bf

-
-
Filenames
-

*.bf, *.b

-
-
MIME types
-

application/x-brainfuck

-
-
-

Lexer for the esoteric BrainFuck -language.

-
- -
-
-class pygments.lexers.esoteric.CAmkESLexer¶
-
-
Short names
-

camkes, idl4

-
-
Filenames
-

*.camkes, *.idl4

-
-
MIME types
-

None

-
-
-

Basic lexer for the input language for the -CAmkES component platform.

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.esoteric.CapDLLexer¶
-
-
Short names
-

capdl

-
-
Filenames
-

*.cdl

-
-
MIME types
-

None

-
-
-

Basic lexer for -CapDL.

-

The source of the primary tool that reads such specifications is available -at https://github.com/seL4/capdl/tree/master/capDL-tool. Note that this -lexer only supports a subset of the grammar. For example, identifiers can -shadow type names, but these instances are currently incorrectly -highlighted as types. Supporting this would need a stateful lexer that is -considered unnecessarily complex for now.

-
-

New in version 2.2.

-
-
- -
-
-class pygments.lexers.esoteric.RedcodeLexer¶
-
-
Short names
-

redcode

-
-
Filenames
-

*.cw

-
-
MIME types
-

None

-
-
-

A simple Redcode lexer based on ICWS’94. -Contributed by Adam Blinkinsop <blinks@acm.org>.

-
-

New in version 0.8.

-
-
- -
-
-

Pygments lexers for Ezhil language¶

-
-
-class pygments.lexers.ezhil.EzhilLexer¶
-
-
Short names
-

ezhil

-
-
Filenames
-

*.n

-
-
MIME types
-

text/x-ezhil

-
-
-

Lexer for Ezhil, a Tamil script-based programming language

-
-

New in version 2.1.

-
-
- -
-
-

Lexers for the Factor language¶

-
-
-class pygments.lexers.factor.FactorLexer¶
-
-
Short names
-

factor

-
-
Filenames
-

*.factor

-
-
MIME types
-

text/x-factor

-
-
-

Lexer for the Factor language.

-
-

New in version 1.4.

-
-
- -
-
-

Lexer for the Fantom language¶

-
-
-class pygments.lexers.fantom.FantomLexer¶
-
-
Short names
-

fan

-
-
Filenames
-

*.fan

-
-
MIME types
-

application/x-fantom

-
-
-

For Fantom source code.

-
-

New in version 1.5.

-
-
- -
-
-

Lexer for the Felix language¶

-
-
-class pygments.lexers.felix.FelixLexer¶
-
-
Short names
-

felix, flx

-
-
Filenames
-

*.flx, *.flxh

-
-
MIME types
-

text/x-felix

-
-
-

For Felix source code.

-
-

New in version 1.2.

-
-
- -
-
-

Lexer for FloScript¶

-
-
-class pygments.lexers.floscript.FloScriptLexer¶
-
-
Short names
-

floscript, flo

-
-
Filenames
-

*.flo

-
-
MIME types
-

None

-
-
-

For FloScript configuration language source code.

-
-

New in version 2.4.

-
-
- -
-
-

Lexer for the Forth language¶

-
-
-class pygments.lexers.forth.ForthLexer¶
-
-
Short names
-

forth

-
-
Filenames
-

*.frt, *.fs

-
-
MIME types
-

application/x-forth

-
-
-

Lexer for Forth files.

-
-

New in version 2.2.

-
-
- -
-
-

Lexers for Fortran languages¶

-
-
-class pygments.lexers.fortran.FortranFixedLexer¶
-
-
Short names
-

fortranfixed

-
-
Filenames
-

*.f, *.F

-
-
MIME types
-

None

-
-
-

Lexer for fixed format Fortran.

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.fortran.FortranLexer¶
-
-
Short names
-

fortran

-
-
Filenames
-

*.f03, *.f90, *.F03, *.F90

-
-
MIME types
-

text/x-fortran

-
-
-

Lexer for FORTRAN 90 code.

-
-

New in version 0.10.

-
-
- -
-
-

Simple lexer for Microsoft Visual FoxPro source code¶

-
-
-class pygments.lexers.foxpro.FoxProLexer¶
-
-
Short names
-

foxpro, vfp, clipper, xbase

-
-
Filenames
-

*.PRG, *.prg

-
-
MIME types
-

None

-
-
-

Lexer for Microsoft Visual FoxPro language.

-

FoxPro syntax allows to shorten all keywords and function names -to 4 characters. Shortened forms are not recognized by this lexer.

-
-

New in version 1.6.

-
-
- -
-
-

Lexer for FreeFem++ language¶

-
-
-class pygments.lexers.freefem.FreeFemLexer¶
-
-
Short names
-

freefem

-
-
Filenames
-

*.edp

-
-
MIME types
-

text/x-freefem

-
-
-

For FreeFem++ source.

-

This is an extension of the CppLexer, as the FreeFem Language is a superset -of C++.

-
-

New in version 2.4.

-
-
- -
-
-

Lexers for the Google Go language¶

-
-
-class pygments.lexers.go.GoLexer¶
-
-
Short names
-

go

-
-
Filenames
-

*.go

-
-
MIME types
-

text/x-gosrc

-
-
-

For Go source.

-
-

New in version 1.2.

-
-
- -
-
-

Lexers for grammer notations like BNF¶

-
-
-class pygments.lexers.grammar_notation.AbnfLexer¶
-
-
Short names
-

abnf

-
-
Filenames
-

*.abnf

-
-
MIME types
-

text/x-abnf

-
-
-

Lexer for IETF 7405 ABNF -(Updates 5234) -grammars.

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.grammar_notation.BnfLexer¶
-
-
Short names
-

bnf

-
-
Filenames
-

*.bnf

-
-
MIME types
-

text/x-bnf

-
-
-

This lexer is for grammer notations which are similar to -original BNF.

-

In order to maximize a number of targets of this lexer, -let’s decide some designs:

-
    -
  • We don’t distinguish Terminal Symbol.

  • -
  • We do assume that NonTerminal Symbol are always enclosed -with arrow brackets.

  • -
  • We do assume that NonTerminal Symbol may include -any printable characters except arrow brackets and ASCII 0x20. -This assumption is for RBNF.

  • -
  • We do assume that target notation doesn’t support comment.

  • -
  • We don’t distinguish any operators and punctuation except -::=.

  • -
-

Though these desision making might cause too minimal highlighting -and you might be disappointed, but it is reasonable for us.

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.grammar_notation.JsgfLexer¶
-
-
Short names
-

jsgf

-
-
Filenames
-

*.jsgf

-
-
MIME types
-

application/jsgf, application/x-jsgf, text/jsgf

-
-
-

For JSpeech Grammar Format -grammars.

-
-

New in version 2.2.

-
-
- -
-
-

Lexers for graph query languages¶

-
-
-class pygments.lexers.graph.CypherLexer¶
-
-
Short names
-

cypher

-
-
Filenames
-

*.cyp, *.cypher

-
-
MIME types
-

None

-
-
-

For Cypher Query Language

-

For the Cypher version in Neo4j 3.3

-
-

New in version 2.0.

-
-
- -
- - - -
-

Lexers for hardware descriptor languages¶

-
-
-class pygments.lexers.hdl.SystemVerilogLexer¶
-
-
Short names
-

systemverilog, sv

-
-
Filenames
-

*.sv, *.svh

-
-
MIME types
-

text/x-systemverilog

-
-
-

Extends verilog lexer to recognise all SystemVerilog keywords from IEEE -1800-2009 standard.

-
-

New in version 1.5.

-
-
- -
-
-class pygments.lexers.hdl.VerilogLexer¶
-
-
Short names
-

verilog, v

-
-
Filenames
-

*.v

-
-
MIME types
-

text/x-verilog

-
-
-

For verilog source code with preprocessor directives.

-
-

New in version 1.4.

-
-
- -
-
-class pygments.lexers.hdl.VhdlLexer¶
-
-
Short names
-

vhdl

-
-
Filenames
-

*.vhdl, *.vhd

-
-
MIME types
-

text/x-vhdl

-
-
-

For VHDL source code.

-
-

New in version 1.5.

-
-
- -
-
-

Lexers for hexadecimal dumps¶

-
-
-class pygments.lexers.hexdump.HexdumpLexer¶
-
-
Short names
-

hexdump

-
-
Filenames
-

None

-
-
MIME types
-

None

-
-
-

For typical hex dump output formats by the UNIX and GNU/Linux tools hexdump, -hd, hexcat, od and xxd, and the DOS tool DEBUG. For example:

-
00000000  7f 45 4c 46 02 01 01 00  00 00 00 00 00 00 00 00  |.ELF............|
-00000010  02 00 3e 00 01 00 00 00  c5 48 40 00 00 00 00 00  |..>......H@.....|
-
-
-

The specific supported formats are the outputs of:

-
    -
  • hexdump FILE

  • -
  • hexdump -C FILE – the canonical format used in the example.

  • -
  • hd FILE – same as hexdump -C FILE.

  • -
  • hexcat FILE

  • -
  • od -t x1z FILE

  • -
  • xxd FILE

  • -
  • DEBUG.EXE FILE.COM and entering d to the prompt.

  • -
-
-

New in version 2.1.

-
-
- -
- -
-

Lexers for IDL¶

-
-
-class pygments.lexers.idl.IDLLexer¶
-
-
Short names
-

idl

-
-
Filenames
-

*.pro

-
-
MIME types
-

text/idl

-
-
-

Pygments Lexer for IDL (Interactive Data Language).

-
-

New in version 1.6.

-
-
- -
-
-

Lexers for Igor Pro¶

-
-
-class pygments.lexers.igor.IgorLexer¶
-
-
Short names
-

igor, igorpro

-
-
Filenames
-

*.ipf

-
-
MIME types
-

text/ipf

-
-
-

Pygments Lexer for Igor Pro procedure files (.ipf). -See http://www.wavemetrics.com/ and http://www.igorexchange.com/.

-
-

New in version 2.0.

-
-
- -
- -
-

Lexers for installer/packager DSLs and formats¶

-
-
-class pygments.lexers.installers.DebianControlLexer¶
-
-
Short names
-

control, debcontrol

-
-
Filenames
-

control

-
-
MIME types
-

None

-
-
-

Lexer for Debian control files and apt-cache show <pkg> outputs.

-
-

New in version 0.9.

-
-
- -
-
-class pygments.lexers.installers.NSISLexer¶
-
-
Short names
-

nsis, nsi, nsh

-
-
Filenames
-

*.nsi, *.nsh

-
-
MIME types
-

text/x-nsis

-
-
-

For NSIS scripts.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.installers.RPMSpecLexer¶
-
-
Short names
-

spec

-
-
Filenames
-

*.spec

-
-
MIME types
-

text/x-rpm-spec

-
-
-

For RPM .spec files.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.installers.SourcesListLexer¶
-
-
Short names
-

sourceslist, sources.list, debsources

-
-
Filenames
-

sources.list

-
-
MIME types
-

None

-
-
-

Lexer that highlights debian sources.list files.

-
-

New in version 0.7.

-
-
- -
-
-

Lexers for interactive fiction languages¶

-
-
-class pygments.lexers.int_fiction.Inform6Lexer¶
-
-
Short names
-

inform6, i6

-
-
Filenames
-

*.inf

-
-
MIME types
-

None

-
-
-

For Inform 6 source code.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.int_fiction.Inform6TemplateLexer¶
-
-
Short names
-

i6t

-
-
Filenames
-

*.i6t

-
-
MIME types
-

None

-
-
-

For Inform 6 template code.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.int_fiction.Inform7Lexer¶
-
-
Short names
-

inform7, i7

-
-
Filenames
-

*.ni, *.i7x

-
-
MIME types
-

None

-
-
-

For Inform 7 source code.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.int_fiction.Tads3Lexer¶
-
-
Short names
-

tads3

-
-
Filenames
-

*.t

-
-
MIME types
-

None

-
-
-

For TADS 3 source code.

-
- -
-
-

Lexers for the Io language¶

-
-
-class pygments.lexers.iolang.IoLexer¶
-
-
Short names
-

io

-
-
Filenames
-

*.io

-
-
MIME types
-

text/x-iosrc

-
-
-

For Io (a small, prototype-based -programming language) source.

-
-

New in version 0.10.

-
-
- -
-
-

Lexer for the J programming language¶

-
-
-class pygments.lexers.j.JLexer¶
-
-
Short names
-

j

-
-
Filenames
-

*.ijs

-
-
MIME types
-

text/x-j

-
-
-

For J source code.

-
-

New in version 2.1.

-
-
- -
- -
-

Lexers for the Julia language¶

-
-
-class pygments.lexers.julia.JuliaConsoleLexer¶
-
-
Short names
-

jlcon

-
-
Filenames
-

None

-
-
MIME types
-

None

-
-
-

For Julia console sessions. Modeled after MatlabSessionLexer.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.julia.JuliaLexer¶
-
-
Short names
-

julia, jl

-
-
Filenames
-

*.jl

-
-
MIME types
-

text/x-julia, application/x-julia

-
-
-

For Julia source code.

-
-

New in version 1.6.

-
-
- -
-
-

Pygments lexers for JVM languages¶

-
-
-class pygments.lexers.jvm.AspectJLexer¶
-
-
Short names
-

aspectj

-
-
Filenames
-

*.aj

-
-
MIME types
-

text/x-aspectj

-
-
-

For AspectJ source code.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.jvm.CeylonLexer¶
-
-
Short names
-

ceylon

-
-
Filenames
-

*.ceylon

-
-
MIME types
-

text/x-ceylon

-
-
-

For Ceylon source code.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.jvm.ClojureLexer¶
-
-
Short names
-

clojure, clj

-
-
Filenames
-

*.clj

-
-
MIME types
-

text/x-clojure, application/x-clojure

-
-
-

Lexer for Clojure source code.

-
-

New in version 0.11.

-
-
- -
-
-class pygments.lexers.jvm.ClojureScriptLexer¶
-
-
Short names
-

clojurescript, cljs

-
-
Filenames
-

*.cljs

-
-
MIME types
-

text/x-clojurescript, application/x-clojurescript

-
-
-

Lexer for ClojureScript -source code.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.jvm.GoloLexer¶
-
-
Short names
-

golo

-
-
Filenames
-

*.golo

-
-
MIME types
-

None

-
-
-

For Golo source code.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.jvm.GosuLexer¶
-
-
Short names
-

gosu

-
-
Filenames
-

*.gs, *.gsx, *.gsp, *.vark

-
-
MIME types
-

text/x-gosu

-
-
-

For Gosu source code.

-
-

New in version 1.5.

-
-
- -
-
-class pygments.lexers.jvm.GosuTemplateLexer¶
-
-
Short names
-

gst

-
-
Filenames
-

*.gst

-
-
MIME types
-

text/x-gosu-template

-
-
-

For Gosu templates.

-
-

New in version 1.5.

-
-
- -
-
-class pygments.lexers.jvm.GroovyLexer¶
-
-
Short names
-

groovy

-
-
Filenames
-

*.groovy, *.gradle

-
-
MIME types
-

text/x-groovy

-
-
-

For Groovy source code.

-
-

New in version 1.5.

-
-
- -
-
-class pygments.lexers.jvm.IokeLexer¶
-
-
Short names
-

ioke, ik

-
-
Filenames
-

*.ik

-
-
MIME types
-

text/x-iokesrc

-
-
-

For Ioke (a strongly typed, dynamic, -prototype based programming language) source.

-
-

New in version 1.4.

-
-
- -
-
-class pygments.lexers.jvm.JasminLexer¶
-
-
Short names
-

jasmin, jasminxt

-
-
Filenames
-

*.j

-
-
MIME types
-

None

-
-
-

For Jasmin assembly code.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.jvm.JavaLexer¶
-
-
Short names
-

java

-
-
Filenames
-

*.java

-
-
MIME types
-

text/x-java

-
-
-

For Java source code.

-
- -
-
-class pygments.lexers.jvm.KotlinLexer¶
-
-
Short names
-

kotlin

-
-
Filenames
-

*.kt

-
-
MIME types
-

text/x-kotlin

-
-
-

For Kotlin -source code.

-
-

New in version 1.5.

-
-
- -
-
-class pygments.lexers.jvm.PigLexer¶
-
-
Short names
-

pig

-
-
Filenames
-

*.pig

-
-
MIME types
-

text/x-pig

-
-
-

For Pig Latin source code.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.jvm.SarlLexer¶
-
-
Short names
-

sarl

-
-
Filenames
-

*.sarl

-
-
MIME types
-

text/x-sarl

-
-
-
- -

For SARL source code.

-

- .. versionadded:: 2.4

-

-
-
-class pygments.lexers.jvm.ScalaLexer¶
-
-
Short names
-

scala

-
-
Filenames
-

*.scala

-
-
MIME types
-

text/x-scala

-
-
-

For Scala source code.

-
- -
-
-class pygments.lexers.jvm.XtendLexer¶
-
-
Short names
-

xtend

-
-
Filenames
-

*.xtend

-
-
MIME types
-

text/x-xtend

-
-
-

For Xtend source code.

-
-

New in version 1.6.

-
-
- -
-
-

Lexers for Lispy languages¶

-
-
-class pygments.lexers.lisp.CPSALexer¶
-
-
Short names
-

cpsa

-
-
Filenames
-

*.cpsa

-
-
MIME types
-

None

-
-
-

A CPSA lexer based on the CPSA language as of version 2.2.12

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.lisp.CommonLispLexer¶
-
-
Short names
-

common-lisp, cl, lisp

-
-
Filenames
-

*.cl, *.lisp

-
-
MIME types
-

text/x-common-lisp

-
-
-

A Common Lisp lexer.

-
-

New in version 0.9.

-
-
- -
-
-class pygments.lexers.lisp.EmacsLispLexer¶
-
-
Short names
-

emacs, elisp, emacs-lisp

-
-
Filenames
-

*.el

-
-
MIME types
-

text/x-elisp, application/x-elisp

-
-
-

An ELisp lexer, parsing a stream and outputting the tokens -needed to highlight elisp code.

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.lisp.FennelLexer¶
-
-
Short names
-

fennel, fnl

-
-
Filenames
-

*.fnl

-
-
MIME types
-

None

-
-
-

A lexer for the Fennel programming language.

-

Fennel compiles to Lua, so all the Lua builtins are recognized as well -as the special forms that are particular to the Fennel compiler.

-
-

New in version 2.3.

-
-
- -
-
-class pygments.lexers.lisp.HyLexer¶
-
-
Short names
-

hylang

-
-
Filenames
-

*.hy

-
-
MIME types
-

text/x-hy, application/x-hy

-
-
-

Lexer for Hy source code.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.lisp.NewLispLexer¶
-
-
Short names
-

newlisp

-
-
Filenames
-

*.lsp, *.nl, *.kif

-
-
MIME types
-

text/x-newlisp, application/x-newlisp

-
-
-

For newLISP. source code (version 10.3.0).

-
-

New in version 1.5.

-
-
- -
-
-class pygments.lexers.lisp.RacketLexer¶
-
-
Short names
-

racket, rkt

-
-
Filenames
-

*.rkt, *.rktd, *.rktl

-
-
MIME types
-

text/x-racket, application/x-racket

-
-
-

Lexer for Racket source code (formerly -known as PLT Scheme).

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.lisp.SchemeLexer¶
-
-
Short names
-

scheme, scm

-
-
Filenames
-

*.scm, *.ss

-
-
MIME types
-

text/x-scheme, application/x-scheme

-
-
-

A Scheme lexer, parsing a stream and outputting the tokens -needed to highlight scheme code. -This lexer could be most probably easily subclassed to parse -other LISP-Dialects like Common Lisp, Emacs Lisp or AutoLisp.

-

This parser is checked with pastes from the LISP pastebin -at http://paste.lisp.org/ to cover as much syntax as possible.

-

It supports the full Scheme syntax as defined in R5RS.

-
-

New in version 0.6.

-
-
- -
-
-class pygments.lexers.lisp.ShenLexer¶
-
-
Short names
-

shen

-
-
Filenames
-

*.shen

-
-
MIME types
-

text/x-shen, application/x-shen

-
-
-

Lexer for Shen source code.

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.lisp.XtlangLexer¶
-
-
Short names
-

extempore

-
-
Filenames
-

*.xtm

-
-
MIME types
-

None

-
-
-

An xtlang lexer for the Extempore programming environment.

-

This is a mixture of Scheme and xtlang, really. Keyword lists are -taken from the Extempore Emacs mode -(https://github.com/extemporelang/extempore-emacs-mode)

-
-

New in version 2.2.

-
-
- -
-
-

Lexers for Makefiles and similar¶

-
-
-class pygments.lexers.make.BaseMakefileLexer¶
-
-
Short names
-

basemake

-
-
Filenames
-

None

-
-
MIME types
-

None

-
-
-

Lexer for simple Makefiles (no preprocessing).

-
-

New in version 0.10.

-
-
- -
-
-class pygments.lexers.make.CMakeLexer¶
-
-
Short names
-

cmake

-
-
Filenames
-

*.cmake, CMakeLists.txt

-
-
MIME types
-

text/x-cmake

-
-
-

Lexer for CMake files.

-
-

New in version 1.2.

-
-
- -
-
-class pygments.lexers.make.MakefileLexer¶
-
-
Short names
-

make, makefile, mf, bsdmake

-
-
Filenames
-

*.mak, *.mk, Makefile, makefile, Makefile.*, GNUmakefile

-
-
MIME types
-

text/x-makefile

-
-
-

Lexer for BSD and GNU make extensions (lenient enough to handle both in -the same file even).

-

Rewritten in Pygments 0.10.

-
- -
-
-

Lexers for non-HTML markup languages¶

-
-
-class pygments.lexers.markup.BBCodeLexer¶
-
-
Short names
-

bbcode

-
-
Filenames
-

None

-
-
MIME types
-

text/x-bbcode

-
-
-

A lexer that highlights BBCode(-like) syntax.

-
-

New in version 0.6.

-
-
- -
-
-class pygments.lexers.markup.GroffLexer¶
-
-
Short names
-

groff, nroff, man

-
-
Filenames
-

*.[1234567], *.man

-
-
MIME types
-

application/x-troff, text/troff

-
-
-

Lexer for the (g)roff typesetting language, supporting groff -extensions. Mainly useful for highlighting manpage sources.

-
-

New in version 0.6.

-
-
- -
-
-class pygments.lexers.markup.MarkdownLexer¶
-
-
Short names
-

md

-
-
Filenames
-

*.md

-
-
MIME types
-

text/x-markdown

-
-
-

For Markdown markup.

-
-

New in version 2.2.

-
-
- -
-
-class pygments.lexers.markup.MoinWikiLexer¶
-
-
Short names
-

trac-wiki, moin

-
-
Filenames
-

None

-
-
MIME types
-

text/x-trac-wiki

-
-
-

For MoinMoin (and Trac) Wiki markup.

-
-

New in version 0.7.

-
-
- -
-
-class pygments.lexers.markup.MozPreprocCssLexer¶
-
-
Short names
-

css+mozpreproc

-
-
Filenames
-

*.css.in

-
-
MIME types
-

None

-
-
-

Subclass of the MozPreprocHashLexer that highlights unlexed data with the -CssLexer.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.markup.MozPreprocHashLexer¶
-
-
Short names
-

mozhashpreproc

-
-
Filenames
-

None

-
-
MIME types
-

None

-
-
-

Lexer for Mozilla Preprocessor files (with ‘#’ as the marker).

-

Other data is left untouched.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.markup.MozPreprocJavascriptLexer¶
-
-
Short names
-

javascript+mozpreproc

-
-
Filenames
-

*.js.in

-
-
MIME types
-

None

-
-
-

Subclass of the MozPreprocHashLexer that highlights unlexed data with the -JavascriptLexer.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.markup.MozPreprocPercentLexer¶
-
-
Short names
-

mozpercentpreproc

-
-
Filenames
-

None

-
-
MIME types
-

None

-
-
-

Lexer for Mozilla Preprocessor files (with ‘%’ as the marker).

-

Other data is left untouched.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.markup.MozPreprocXulLexer¶
-
-
Short names
-

xul+mozpreproc

-
-
Filenames
-

*.xul.in

-
-
MIME types
-

None

-
-
-

Subclass of the MozPreprocHashLexer that highlights unlexed data with the -XmlLexer.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.markup.RstLexer¶
-
-
Short names
-

rst, rest, restructuredtext

-
-
Filenames
-

*.rst, *.rest

-
-
MIME types
-

text/x-rst, text/prs.fallenstein.rst

-
-
-

For reStructuredText markup.

-
-

New in version 0.7.

-
-

Additional options accepted:

-
-
handlecodeblocks

Highlight the contents of .. sourcecode:: language, -.. code:: language and .. code-block:: language -directives with a lexer for the given language (default: -True).

-
-

New in version 0.8.

-
-
-
-
- -
-
-class pygments.lexers.markup.TexLexer¶
-
-
Short names
-

tex, latex

-
-
Filenames
-

*.tex, *.aux, *.toc

-
-
MIME types
-

text/x-tex, text/x-latex

-
-
-

Lexer for the TeX and LaTeX typesetting languages.

-
- -
- -
-

Lexer for Multipurpose Internet Mail Extensions (MIME) data¶

-
-
-class pygments.lexers.mime.MIMELexer¶
-
-
Short names
-

mime

-
-
Filenames
-

None

-
-
MIME types
-

multipart/mixed, multipart/related, multipart/alternative

-
-
-

Lexer for Multipurpose Internet Mail Extensions (MIME) data. This lexer is -designed to process the nested mulitpart data.

-

It assumes that the given data contains both header and body (and is -splitted by empty line). If no valid header is found, then the entire data -would be treated as body.

-

Additional options accepted:

-
-
MIME-max-level

Max recurssion level for nested MIME structure. Any negative number -would treated as unlimited. (default: -1)

-
-
Content-Type

Treat the data as specific content type. Useful when header is -missing, or this lexer would try to parse from header. (default: -text/plain)

-
-
Multipart-Boundary

Set the default multipart boundary delimiter. This option is only used -when Content-Type is multipart and header is missing. This lexer -would try to parse from header by default. (default: None)

-
-
Content-Transfer-Encoding

Treat the data as specific encoding. Or this lexer would try to parse -from header by default. (default: None)

-
-
-
-

New in version 2.5.

-
-
- -
-
-

Lexers for ML family languages¶

-
-
-class pygments.lexers.ml.OcamlLexer¶
-
-
Short names
-

ocaml

-
-
Filenames
-

*.ml, *.mli, *.mll, *.mly

-
-
MIME types
-

text/x-ocaml

-
-
-

For the OCaml language.

-
-

New in version 0.7.

-
-
- -
-
-class pygments.lexers.ml.OpaLexer¶
-
-
Short names
-

opa

-
-
Filenames
-

*.opa

-
-
MIME types
-

text/x-opa

-
-
-

Lexer for the Opa language (http://opalang.org).

-
-

New in version 1.5.

-
-
- -
-
-class pygments.lexers.ml.SMLLexer¶
-
-
Short names
-

sml

-
-
Filenames
-

*.sml, *.sig, *.fun

-
-
MIME types
-

text/x-standardml, application/x-standardml

-
-
-

For the Standard ML language.

-
-

New in version 1.5.

-
-
- -
-
-

Lexers for modeling languages¶

-
-
-class pygments.lexers.modeling.BugsLexer¶
-
-
Short names
-

bugs, winbugs, openbugs

-
-
Filenames
-

*.bug

-
-
MIME types
-

None

-
-
-

Pygments Lexer for OpenBugs and WinBugs -models.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.modeling.JagsLexer¶
-
-
Short names
-

jags

-
-
Filenames
-

*.jag, *.bug

-
-
MIME types
-

None

-
-
-

Pygments Lexer for JAGS.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.modeling.ModelicaLexer¶
-
-
Short names
-

modelica

-
-
Filenames
-

*.mo

-
-
MIME types
-

text/x-modelica

-
-
-

For Modelica source code.

-
-

New in version 1.1.

-
-
- -
-
-class pygments.lexers.modeling.StanLexer¶
-
-
Short names
-

stan

-
-
Filenames
-

*.stan

-
-
MIME types
-

None

-
-
-

Pygments Lexer for Stan models.

-

The Stan modeling language is specified in the Stan Modeling Language -User’s Guide and Reference Manual, v2.17.0, -pdf.

-
-

New in version 1.6.

-
-
- -
-
-

Multi-Dialect Lexer for Modula-2¶

-
-
-class pygments.lexers.modula2.Modula2Lexer¶
-
-
Short names
-

modula2, m2

-
-
Filenames
-

*.def, *.mod

-
-
MIME types
-

text/x-modula2

-
-
-

For Modula-2 source code.

-

The Modula-2 lexer supports several dialects. By default, it operates in -fallback mode, recognising the combined literals, punctuation symbols -and operators of all supported dialects, and the combined reserved words -and builtins of PIM Modula-2, ISO Modula-2 and Modula-2 R10, while not -differentiating between library defined identifiers.

-

To select a specific dialect, a dialect option may be passed -or a dialect tag may be embedded into a source file.

-

Dialect Options:

-
-
m2pim

Select PIM Modula-2 dialect.

-
-
m2iso

Select ISO Modula-2 dialect.

-
-
m2r10

Select Modula-2 R10 dialect.

-
-
objm2

Select Objective Modula-2 dialect.

-
-
-

The PIM and ISO dialect options may be qualified with a language extension.

-

Language Extensions:

-
-
+aglet

Select Aglet Modula-2 extensions, available with m2iso.

-
-
+gm2

Select GNU Modula-2 extensions, available with m2pim.

-
-
+p1

Select p1 Modula-2 extensions, available with m2iso.

-
-
+xds

Select XDS Modula-2 extensions, available with m2iso.

-
-
-

Passing a Dialect Option via Unix Commandline Interface

-

Dialect options may be passed to the lexer using the dialect key. -Only one such option should be passed. If multiple dialect options are -passed, the first valid option is used, any subsequent options are ignored.

-

Examples:

-
-
$ pygmentize -O full,dialect=m2iso -f html -o /path/to/output /path/to/input

Use ISO dialect to render input to HTML output

-
-
$ pygmentize -O full,dialect=m2iso+p1 -f rtf -o /path/to/output /path/to/input

Use ISO dialect with p1 extensions to render input to RTF output

-
-
-

Embedding a Dialect Option within a source file

-

A dialect option may be embedded in a source file in form of a dialect -tag, a specially formatted comment that specifies a dialect option.

-

Dialect Tag EBNF:

-
dialectTag :
-    OpeningCommentDelim Prefix dialectOption ClosingCommentDelim ;
-
-dialectOption :
-    'm2pim' | 'm2iso' | 'm2r10' | 'objm2' |
-    'm2iso+aglet' | 'm2pim+gm2' | 'm2iso+p1' | 'm2iso+xds' ;
-
-Prefix : '!' ;
-
-OpeningCommentDelim : '(*' ;
-
-ClosingCommentDelim : '*)' ;
-
-
-

No whitespace is permitted between the tokens of a dialect tag.

-

In the event that a source file contains multiple dialect tags, the first -tag that contains a valid dialect option will be used and any subsequent -dialect tags will be ignored. Ideally, a dialect tag should be placed -at the beginning of a source file.

-

An embedded dialect tag overrides a dialect option set via command line.

-

Examples:

-
-
(*!m2r10*) DEFINITION MODULE Foobar; ...

Use Modula2 R10 dialect to render this source file.

-
-
(*!m2pim+gm2*) DEFINITION MODULE Bazbam; ...

Use PIM dialect with GNU extensions to render this source file.

-
-
-

Algol Publication Mode:

-

In Algol publication mode, source text is rendered for publication of -algorithms in scientific papers and academic texts, following the format -of the Revised Algol-60 Language Report. It is activated by passing -one of two corresponding styles as an option:

-
-
algol

render reserved words lowercase underline boldface -and builtins lowercase boldface italic

-
-
algol_nu

render reserved words lowercase boldface (no underlining) -and builtins lowercase boldface italic

-
-
-

The lexer automatically performs the required lowercase conversion when -this mode is activated.

-

Example:

-
-
$ pygmentize -O full,style=algol -f latex -o /path/to/output /path/to/input

Render input file in Algol publication mode to LaTeX output.

-
-
-

Rendering Mode of First Class ADT Identifiers:

-

The rendering of standard library first class ADT identifiers is controlled -by option flag “treat_stdlib_adts_as_builtins”.

-

When this option is turned on, standard library ADT identifiers are rendered -as builtins. When it is turned off, they are rendered as ordinary library -identifiers.

-

treat_stdlib_adts_as_builtins (default: On)

-

The option is useful for dialects that support ADTs as first class objects -and provide ADTs in the standard library that would otherwise be built-in.

-

At present, only Modula-2 R10 supports library ADTs as first class objects -and therefore, no ADT identifiers are defined for any other dialects.

-

Example:

-
-
$ pygmentize -O full,dialect=m2r10,treat_stdlib_adts_as_builtins=Off ...

Render standard library ADTs as ordinary library types.

-
-
-
-

New in version 1.3.

-
-
-

Changed in version 2.1: Added multi-dialect support.

-
-
- -
-
-

Lexer for the Monte programming language¶

-
-
-class pygments.lexers.monte.MonteLexer¶
-
-
Short names
-

monte

-
-
Filenames
-

*.mt

-
-
MIME types
-

None

-
-
-

Lexer for the Monte programming language.

-
-

New in version 2.2.

-
-
- -
-
-

Lexers for NCAR Command Language¶

-
-
-class pygments.lexers.ncl.NCLLexer¶
-
-
Short names
-

ncl

-
-
Filenames
-

*.ncl

-
-
MIME types
-

text/ncl

-
-
-

Lexer for NCL code.

-
-

New in version 2.2.

-
-
- -
-
-

Lexer for the Nim language (formerly known as Nimrod)¶

-
-
-class pygments.lexers.nimrod.NimrodLexer¶
-
-
Short names
-

nim, nimrod

-
-
Filenames
-

*.nim, *.nimrod

-
-
MIME types
-

text/x-nim

-
-
-

For Nim source code.

-
-

New in version 1.5.

-
-
- -
-
-

Lexer for the Nit language¶

-
-
-class pygments.lexers.nit.NitLexer¶
-
-
Short names
-

nit

-
-
Filenames
-

*.nit

-
-
MIME types
-

None

-
-
-

For nit source.

-
-

New in version 2.0.

-
-
- -
-
-

Lexers for the NixOS Nix language¶

-
-
-class pygments.lexers.nix.NixLexer¶
-
-
Short names
-

nixos, nix

-
-
Filenames
-

*.nix

-
-
MIME types
-

text/x-nix

-
-
-

For the Nix language.

-
-

New in version 2.0.

-
-
- -
-
-

Lexers for Oberon family languages¶

-
-
-class pygments.lexers.oberon.ComponentPascalLexer¶
-
-
Short names
-

componentpascal, cp

-
-
Filenames
-

*.cp, *.cps

-
-
MIME types
-

text/x-component-pascal

-
-
-

For Component Pascal source code.

-
-

New in version 2.1.

-
-
- -
-
-

Lexers for Objective-C family languages¶

-
-
-class pygments.lexers.objective.LogosLexer¶
-
-
Short names
-

logos

-
-
Filenames
-

*.x, *.xi, *.xm, *.xmi

-
-
MIME types
-

text/x-logos

-
-
-

For Logos + Objective-C source code with preprocessor directives.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.objective.ObjectiveCLexer¶
-
-
Short names
-

objective-c, objectivec, obj-c, objc

-
-
Filenames
-

*.m, *.h

-
-
MIME types
-

text/x-objective-c

-
-
-

For Objective-C source code with preprocessor directives.

-
- -
-
-class pygments.lexers.objective.ObjectiveCppLexer¶
-
-
Short names
-

objective-c++, objectivec++, obj-c++, objc++

-
-
Filenames
-

*.mm, *.hh

-
-
MIME types
-

text/x-objective-c++

-
-
-

For Objective-C++ source code with preprocessor directives.

-
- -
-
-class pygments.lexers.objective.SwiftLexer¶
-
-
Short names
-

swift

-
-
Filenames
-

*.swift

-
-
MIME types
-

text/x-swift

-
-
-

For Swift source.

-
-

New in version 2.0.

-
-
- -
-
-

Lexers for the Ooc language¶

-
-
-class pygments.lexers.ooc.OocLexer¶
-
-
Short names
-

ooc

-
-
Filenames
-

*.ooc

-
-
MIME types
-

text/x-ooc

-
-
-

For Ooc source code

-
-

New in version 1.2.

-
-
- -
-
-

Lexer for ParaSail¶

-
-
-class pygments.lexers.parasail.ParaSailLexer¶
-
-
Short names
-

parasail

-
-
Filenames
-

*.psi, *.psl

-
-
MIME types
-

text/x-parasail

-
-
-

For ParaSail source code.

-
-

New in version 2.1.

-
-
- -
-
-

Lexers for parser generators¶

-
-
-class pygments.lexers.parsers.AntlrActionScriptLexer¶
-
-
Short names
-

antlr-as, antlr-actionscript

-
-
Filenames
-

*.G, *.g

-
-
MIME types
-

None

-
-
-

ANTLR with ActionScript Target

-
-

New in version 1.1.

-
-
- -
-
-class pygments.lexers.parsers.AntlrCSharpLexer¶
-
-
Short names
-

antlr-csharp, antlr-c#

-
-
Filenames
-

*.G, *.g

-
-
MIME types
-

None

-
-
-

ANTLR with C# Target

-
-

New in version 1.1.

-
-
- -
-
-class pygments.lexers.parsers.AntlrCppLexer¶
-
-
Short names
-

antlr-cpp

-
-
Filenames
-

*.G, *.g

-
-
MIME types
-

None

-
-
-

ANTLR with CPP Target

-
-

New in version 1.1.

-
-
- -
-
-class pygments.lexers.parsers.AntlrJavaLexer¶
-
-
Short names
-

antlr-java

-
-
Filenames
-

*.G, *.g

-
-
MIME types
-

None

-
-
-

ANTLR with Java Target

-
-

New in version 1..

-
-
- -
-
-class pygments.lexers.parsers.AntlrLexer¶
-
-
Short names
-

antlr

-
-
Filenames
-

None

-
-
MIME types
-

None

-
-
-

Generic ANTLR Lexer. -Should not be called directly, instead -use DelegatingLexer for your target language.

-
-

New in version 1.1.

-
-
- -
-
-class pygments.lexers.parsers.AntlrObjectiveCLexer¶
-
-
Short names
-

antlr-objc

-
-
Filenames
-

*.G, *.g

-
-
MIME types
-

None

-
-
-

ANTLR with Objective-C Target

-
-

New in version 1.1.

-
-
- -
-
-class pygments.lexers.parsers.AntlrPerlLexer¶
-
-
Short names
-

antlr-perl

-
-
Filenames
-

*.G, *.g

-
-
MIME types
-

None

-
-
-

ANTLR with Perl Target

-
-

New in version 1.1.

-
-
- -
-
-class pygments.lexers.parsers.AntlrPythonLexer¶
-
-
Short names
-

antlr-python

-
-
Filenames
-

*.G, *.g

-
-
MIME types
-

None

-
-
-

ANTLR with Python Target

-
-

New in version 1.1.

-
-
- -
-
-class pygments.lexers.parsers.AntlrRubyLexer¶
-
-
Short names
-

antlr-ruby, antlr-rb

-
-
Filenames
-

*.G, *.g

-
-
MIME types
-

None

-
-
-

ANTLR with Ruby Target

-
-

New in version 1.1.

-
-
- -
-
-class pygments.lexers.parsers.EbnfLexer¶
-
-
Short names
-

ebnf

-
-
Filenames
-

*.ebnf

-
-
MIME types
-

text/x-ebnf

-
-
-

Lexer for ISO/IEC 14977 EBNF -grammars.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.parsers.RagelCLexer¶
-
-
Short names
-

ragel-c

-
-
Filenames
-

*.rl

-
-
MIME types
-

None

-
-
-

A lexer for Ragel in a C host file.

-
-

New in version 1.1.

-
-
- -
-
-class pygments.lexers.parsers.RagelCppLexer¶
-
-
Short names
-

ragel-cpp

-
-
Filenames
-

*.rl

-
-
MIME types
-

None

-
-
-

A lexer for Ragel in a CPP host file.

-
-

New in version 1.1.

-
-
- -
-
-class pygments.lexers.parsers.RagelDLexer¶
-
-
Short names
-

ragel-d

-
-
Filenames
-

*.rl

-
-
MIME types
-

None

-
-
-

A lexer for Ragel in a D host file.

-
-

New in version 1.1.

-
-
- -
-
-class pygments.lexers.parsers.RagelEmbeddedLexer¶
-
-
Short names
-

ragel-em

-
-
Filenames
-

*.rl

-
-
MIME types
-

None

-
-
-

A lexer for Ragel embedded in a host language file.

-

This will only highlight Ragel statements. If you want host language -highlighting then call the language-specific Ragel lexer.

-
-

New in version 1.1.

-
-
- -
-
-class pygments.lexers.parsers.RagelJavaLexer¶
-
-
Short names
-

ragel-java

-
-
Filenames
-

*.rl

-
-
MIME types
-

None

-
-
-

A lexer for Ragel in a Java host file.

-
-

New in version 1.1.

-
-
- -
-
-class pygments.lexers.parsers.RagelLexer¶
-
-
Short names
-

ragel

-
-
Filenames
-

None

-
-
MIME types
-

None

-
-
-

A pure Ragel lexer. Use this for -fragments of Ragel. For .rl files, use RagelEmbeddedLexer instead -(or one of the language-specific subclasses).

-
-

New in version 1.1.

-
-
- -
-
-class pygments.lexers.parsers.RagelObjectiveCLexer¶
-
-
Short names
-

ragel-objc

-
-
Filenames
-

*.rl

-
-
MIME types
-

None

-
-
-

A lexer for Ragel in an Objective C host file.

-
-

New in version 1.1.

-
-
- -
-
-class pygments.lexers.parsers.RagelRubyLexer¶
-
-
Short names
-

ragel-ruby, ragel-rb

-
-
Filenames
-

*.rl

-
-
MIME types
-

None

-
-
-

A lexer for Ragel in a Ruby host file.

-
-

New in version 1.1.

-
-
- -
-
-class pygments.lexers.parsers.TreetopLexer¶
-
-
Short names
-

treetop

-
-
Filenames
-

*.treetop, *.tt

-
-
MIME types
-

None

-
-
-

A lexer for Treetop grammars.

-
-

New in version 1.6.

-
-
- -
-
-

Lexers for Pascal family languages¶

-
-
-class pygments.lexers.pascal.AdaLexer¶
-
-
Short names
-

ada, ada95, ada2005

-
-
Filenames
-

*.adb, *.ads, *.ada

-
-
MIME types
-

text/x-ada

-
-
-

For Ada source code.

-
-

New in version 1.3.

-
-
- -
-
-class pygments.lexers.pascal.DelphiLexer¶
-
-
Short names
-

delphi, pas, pascal, objectpascal

-
-
Filenames
-

*.pas, *.dpr

-
-
MIME types
-

text/x-pascal

-
-
-

For Delphi (Borland Object Pascal), -Turbo Pascal and Free Pascal source code.

-

Additional options accepted:

-
-
turbopascal

Highlight Turbo Pascal specific keywords (default: True).

-
-
delphi

Highlight Borland Delphi specific keywords (default: True).

-
-
freepascal

Highlight Free Pascal specific keywords (default: True).

-
-
units

A list of units that should be considered builtin, supported are -System, SysUtils, Classes and Math. -Default is to consider all of them builtin.

-
-
-
- -
-
-

Lexers for the Pawn languages¶

-
-
-class pygments.lexers.pawn.PawnLexer¶
-
-
Short names
-

pawn

-
-
Filenames
-

*.p, *.pwn, *.inc

-
-
MIME types
-

text/x-pawn

-
-
-

For Pawn source code.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.pawn.SourcePawnLexer¶
-
-
Short names
-

sp

-
-
Filenames
-

*.sp

-
-
MIME types
-

text/x-sourcepawn

-
-
-

For SourcePawn source code with preprocessor directives.

-
-

New in version 1.6.

-
-
- -
- - - -
-

Lexer for Praat¶

-
-
-class pygments.lexers.praat.PraatLexer¶
-
-
Short names
-

praat

-
-
Filenames
-

*.praat, *.proc, *.psc

-
-
MIME types
-

None

-
-
-

For Praat scripts.

-
-

New in version 2.1.

-
-
- -
-
-

Lexers for Prolog and Prolog-like languages¶

-
-
-class pygments.lexers.prolog.LogtalkLexer¶
-
-
Short names
-

logtalk

-
-
Filenames
-

*.lgt, *.logtalk

-
-
MIME types
-

text/x-logtalk

-
-
-

For Logtalk source code.

-
-

New in version 0.10.

-
-
- -
-
-class pygments.lexers.prolog.PrologLexer¶
-
-
Short names
-

prolog

-
-
Filenames
-

*.ecl, *.prolog, *.pro, *.pl

-
-
MIME types
-

text/x-prolog

-
-
-

Lexer for Prolog files.

-
- -
- -
-

Lexer for QVT Operational language¶

-
-
-class pygments.lexers.qvt.QVToLexer¶
-
-
Short names
-

qvto, qvt

-
-
Filenames
-

*.qvto

-
-
MIME types
-

None

-
-
-

For the QVT Operational Mapping language.

-

Reference for implementing this: «Meta Object Facility (MOF) 2.0 -Query/View/Transformation Specification», Version 1.1 - January 2011 -(http://www.omg.org/spec/QVT/1.1/), see §8.4, «Concrete Syntax» in -particular.

-

Notable tokens assignments:

-
    -
  • Name.Class is assigned to the identifier following any of the following -keywords: metamodel, class, exception, primitive, enum, transformation -or library

  • -
  • Name.Function is assigned to the names of mappings and queries

  • -
  • Name.Builtin.Pseudo is assigned to the pre-defined variables ‘this’, -‘self’ and ‘result’.

  • -
-
- -
-
-

Lexers for the R/S languages¶

-
-
-class pygments.lexers.r.RConsoleLexer¶
-
-
Short names
-

rconsole, rout

-
-
Filenames
-

*.Rout

-
-
MIME types
-

None

-
-
-

For R console transcripts or R CMD BATCH output files.

-
- -
-
-class pygments.lexers.r.RdLexer¶
-
-
Short names
-

rd

-
-
Filenames
-

*.Rd

-
-
MIME types
-

text/x-r-doc

-
-
-

Pygments Lexer for R documentation (Rd) files

-

This is a very minimal implementation, highlighting little more -than the macros. A description of Rd syntax is found in Writing R -Extensions -and Parsing Rd files.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.r.SLexer¶
-
-
Short names
-

splus, s, r

-
-
Filenames
-

*.S, *.R, .Rhistory, .Rprofile, .Renviron

-
-
MIME types
-

text/S-plus, text/S, text/x-r-source, text/x-r, text/x-R, text/x-r-history, text/x-r-profile

-
-
-

For S, S-plus, and R source code.

-
-

New in version 0.10.

-
-
- -
-
-

Lexers for semantic web and RDF query languages and markup¶

-
-
-class pygments.lexers.rdf.ShExCLexer¶
-
-
Short names
-

shexc, shex

-
-
Filenames
-

*.shex

-
-
MIME types
-

text/shex

-
-
-

Lexer for ShExC shape expressions language syntax.

-
- -
-
-class pygments.lexers.rdf.SparqlLexer¶
-
-
Short names
-

sparql

-
-
Filenames
-

*.rq, *.sparql

-
-
MIME types
-

application/sparql-query

-
-
-

Lexer for SPARQL query language.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.rdf.TurtleLexer¶
-
-
Short names
-

turtle

-
-
Filenames
-

*.ttl

-
-
MIME types
-

text/turtle, application/x-turtle

-
-
-

Lexer for Turtle data language.

-
-

New in version 2.1.

-
-
- -
- -
-

Lexer for resource definition files¶

-
-
-class pygments.lexers.resource.ResourceLexer¶
-
-
Short names
-

resource, resourcebundle

-
-
Filenames
-

None

-
-
MIME types
-

None

-
-
-

Lexer for ICU Resource bundles.

-
-

New in version 2.0.

-
-
- -
-
-

Lexer for Relax-NG Compact syntax¶

-
-
-class pygments.lexers.rnc.RNCCompactLexer¶
-
-
Short names
-

rnc, rng-compact

-
-
Filenames
-

*.rnc

-
-
MIME types
-

None

-
-
-

For RelaxNG-compact syntax.

-
-

New in version 2.2.

-
-
- -
-
-

Lexers for Roboconf DSL¶

-
-
-class pygments.lexers.roboconf.RoboconfGraphLexer¶
-
-
Short names
-

roboconf-graph

-
-
Filenames
-

*.graph

-
-
MIME types
-

None

-
-
-

Lexer for Roboconf graph files.

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.roboconf.RoboconfInstancesLexer¶
-
-
Short names
-

roboconf-instances

-
-
Filenames
-

*.instances

-
-
MIME types
-

None

-
-
-

Lexer for Roboconf instances files.

-
-

New in version 2.1.

-
-
- -
-
-

Lexer for Robot Framework¶

-
-
-class pygments.lexers.robotframework.RobotFrameworkLexer¶
-
-
Short names
-

robotframework

-
-
Filenames
-

*.robot

-
-
MIME types
-

text/x-robotframework

-
-
-

For Robot Framework test data.

-

Supports both space and pipe separated plain text formats.

-
-

New in version 1.6.

-
-
- -
- -
-

Lexers for the Rust language¶

-
-
-class pygments.lexers.rust.RustLexer¶
-
-
Short names
-

rust, rs

-
-
Filenames
-

*.rs, *.rs.in

-
-
MIME types
-

text/rust

-
-
-

Lexer for the Rust programming language (version 1.10).

-
-

New in version 1.6.

-
-
- -
-
-

Lexer for SAS¶

-
-
-class pygments.lexers.sas.SASLexer¶
-
-
Short names
-

sas

-
-
Filenames
-

*.SAS, *.sas

-
-
MIME types
-

text/x-sas, text/sas, application/x-sas

-
-
-

For SAS files.

-
-

New in version 2.2.

-
-
- -
-
-

Lexer for scdoc, a simple man page generator¶

-
-
-class pygments.lexers.scdoc.ScdocLexer¶
-
-
Short names
-

scdoc, scd

-
-
Filenames
-

*.scd, *.scdoc

-
-
MIME types
-

None

-
-
-

scdoc is a simple man page generator for POSIX systems written in C99. -https://git.sr.ht/~sircmpwn/scdoc

-
-

New in version 2.5.

-
-
- -
-
-

Lexer for scripting and embedded languages¶

-
-
-class pygments.lexers.scripting.AppleScriptLexer¶
-
-
Short names
-

applescript

-
-
Filenames
-

*.applescript

-
-
MIME types
-

None

-
-
-

For AppleScript source code, -including AppleScript Studio. -Contributed by Andreas Amann <aamann@mac.com>.

-
-

New in version 1.0.

-
-
- -
-
-class pygments.lexers.scripting.ChaiscriptLexer¶
-
-
Short names
-

chai, chaiscript

-
-
Filenames
-

*.chai

-
-
MIME types
-

text/x-chaiscript, application/x-chaiscript

-
-
-

For ChaiScript source code.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.scripting.EasytrieveLexer¶
-
-
Short names
-

easytrieve

-
-
Filenames
-

*.ezt, *.mac

-
-
MIME types
-

text/x-easytrieve

-
-
-

Easytrieve Plus is a programming language for extracting, filtering and -converting sequential data. Furthermore it can layout data for reports. -It is mainly used on mainframe platforms and can access several of the -mainframe’s native file formats. It is somewhat comparable to awk.

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.scripting.HybrisLexer¶
-
-
Short names
-

hybris, hy

-
-
Filenames
-

*.hy, *.hyb

-
-
MIME types
-

text/x-hybris, application/x-hybris

-
-
-

For Hybris source code.

-
-

New in version 1.4.

-
-
- -
-
-class pygments.lexers.scripting.JclLexer¶
-
-
Short names
-

jcl

-
-
Filenames
-

*.jcl

-
-
MIME types
-

text/x-jcl

-
-
-

Job Control Language (JCL) -is a scripting language used on mainframe platforms to instruct the system -on how to run a batch job or start a subsystem. It is somewhat -comparable to MS DOS batch and Unix shell scripts.

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.scripting.LSLLexer¶
-
-
Short names
-

lsl

-
-
Filenames
-

*.lsl

-
-
MIME types
-

text/x-lsl

-
-
-

For Second Life’s Linden Scripting Language source code.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.scripting.LuaLexer¶
-
-
Short names
-

lua

-
-
Filenames
-

*.lua, *.wlua

-
-
MIME types
-

text/x-lua, application/x-lua

-
-
-

For Lua source code.

-

Additional options accepted:

-
-
func_name_highlighting

If given and True, highlight builtin function names -(default: True).

-
-
disabled_modules

If given, must be a list of module names whose function names -should not be highlighted. By default all modules are highlighted.

-

To get a list of allowed modules have a look into the -_lua_builtins module:

-
>>> from pygments.lexers._lua_builtins import MODULES
->>> MODULES.keys()
-['string', 'coroutine', 'modules', 'io', 'basic', ...]
-
-
-
-
-
- -
-
-class pygments.lexers.scripting.MOOCodeLexer¶
-
-
Short names
-

moocode, moo

-
-
Filenames
-

*.moo

-
-
MIME types
-

text/x-moocode

-
-
-

For MOOCode (the MOO scripting -language).

-
-

New in version 0.9.

-
-
- -
-
-class pygments.lexers.scripting.MoonScriptLexer¶
-
-
Short names
-

moon, moonscript

-
-
Filenames
-

*.moon

-
-
MIME types
-

text/x-moonscript, application/x-moonscript

-
-
-

For MoonScript source code.

-
-

New in version 1.5.

-
-
- -
-
-class pygments.lexers.scripting.RexxLexer¶
-
-
Short names
-

rexx, arexx

-
-
Filenames
-

*.rexx, *.rex, *.rx, *.arexx

-
-
MIME types
-

text/x-rexx

-
-
-

Rexx is a scripting language available for -a wide range of different platforms with its roots found on mainframe -systems. It is popular for I/O- and data based tasks and can act as glue -language to bind different applications together.

-
-

New in version 2.0.

-
-
- -
-
-

Lexer for Smart Game Format (sgf) file format¶

-
-
-class pygments.lexers.sgf.SmartGameFormatLexer¶
-
-
Short names
-

sgf

-
-
Filenames
-

*.sgf

-
-
MIME types
-

None

-
-
-

Lexer for Smart Game Format (sgf) file format.

-

The format is used to store game records of board games for two players -(mainly Go game). -For more information about the definition of the format, see: -https://www.red-bean.com/sgf/

-
-

New in version 2.4.

-
-
- -
-
-

Lexers for various shells¶

-
-
-class pygments.lexers.shell.BashLexer¶
-
-
Short names
-

bash, sh, ksh, zsh, shell

-
-
Filenames
-

*.sh, *.ksh, *.bash, *.ebuild, *.eclass, *.exheres-0, *.exlib, *.zsh, .bashrc, bashrc, .bash\*, bash\*, zshrc, .zshrc, PKGBUILD

-
-
MIME types
-

application/x-sh, application/x-shellscript, text/x-shellscript

-
-
-

Lexer for (ba|k|z|)sh shell scripts.

-
-

New in version 0.6.

-
-
- -
-
-class pygments.lexers.shell.BashSessionLexer¶
-
-
Short names
-

console, shell-session

-
-
Filenames
-

*.sh-session, *.shell-session

-
-
MIME types
-

application/x-shell-session, application/x-sh-session

-
-
-

Lexer for simplistic shell sessions.

-
-

New in version 1.1.

-
-
- -
-
-class pygments.lexers.shell.BatchLexer¶
-
-
Short names
-

bat, batch, dosbatch, winbatch

-
-
Filenames
-

*.bat, *.cmd

-
-
MIME types
-

application/x-dos-batch

-
-
-

Lexer for the DOS/Windows Batch file format.

-
-

New in version 0.7.

-
-
- -
-
-class pygments.lexers.shell.FishShellLexer¶
-
-
Short names
-

fish, fishshell

-
-
Filenames
-

*.fish, *.load

-
-
MIME types
-

application/x-fish

-
-
-

Lexer for Fish shell scripts.

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.shell.MSDOSSessionLexer¶
-
-
Short names
-

doscon

-
-
Filenames
-

None

-
-
MIME types
-

None

-
-
-

Lexer for simplistic MSDOS sessions.

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.shell.PowerShellLexer¶
-
-
Short names
-

powershell, posh, ps1, psm1

-
-
Filenames
-

*.ps1, *.psm1

-
-
MIME types
-

text/x-powershell

-
-
-

For Windows PowerShell code.

-
-

New in version 1.5.

-
-
- -
-
-class pygments.lexers.shell.PowerShellSessionLexer¶
-
-
Short names
-

ps1con

-
-
Filenames
-

None

-
-
MIME types
-

None

-
-
-

Lexer for simplistic Windows PowerShell sessions.

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.shell.SlurmBashLexer¶
-
-
Short names
-

slurm, sbatch

-
-
Filenames
-

*.sl

-
-
MIME types
-

None

-
-
-

Lexer for (ba|k|z|)sh Slurm scripts.

-
-

New in version 2.4.

-
-
- -
-
-class pygments.lexers.shell.TcshLexer¶
-
-
Short names
-

tcsh, csh

-
-
Filenames
-

*.tcsh, *.csh

-
-
MIME types
-

application/x-csh

-
-
-

Lexer for tcsh scripts.

-
-

New in version 0.10.

-
-
- -
-
-class pygments.lexers.shell.TcshSessionLexer¶
-
-
Short names
-

tcshcon

-
-
Filenames
-

None

-
-
MIME types
-

None

-
-
-

Lexer for Tcsh sessions.

-
-

New in version 2.1.

-
-
- -
-
-

Lexer for the Slash programming¶

-
-
-class pygments.lexers.slash.SlashLexer¶
-
-
Short names
-

slash

-
-
Filenames
-

*.sl

-
-
MIME types
-

None

-
-
-

Lexer for the Slash programming language.

-
-

New in version 2.4.

-
-
- -
- -
-

Lexers for the SMV languages¶

-
-
-class pygments.lexers.smv.NuSMVLexer¶
-
-
Short names
-

nusmv

-
-
Filenames
-

*.smv

-
-
MIME types
-

None

-
-
-

Lexer for the NuSMV language.

-
-

New in version 2.2.

-
-
- -
-
-

Lexers for the SNOBOL language¶

-
-
-class pygments.lexers.snobol.SnobolLexer¶
-
-
Short names
-

snobol

-
-
Filenames
-

*.snobol

-
-
MIME types
-

text/x-snobol

-
-
-

Lexer for the SNOBOL4 programming language.

-

Recognizes the common ASCII equivalents of the original SNOBOL4 operators. -Does not require spaces around binary operators.

-
-

New in version 1.5.

-
-
- -
-
-

Lexers for Solidity¶

-
-
-class pygments.lexers.solidity.SolidityLexer¶
-
-
Short names
-

solidity

-
-
Filenames
-

*.sol

-
-
MIME types
-

None

-
-
-

For Solidity source code.

-
-

New in version 2.5.

-
-
- -
-
-

Special lexers¶

-
-
-class pygments.lexers.special.RawTokenLexer¶
-
-
Short names
-

raw

-
-
Filenames
-

None

-
-
MIME types
-

application/x-pygments-tokens

-
-
-

Recreate a token stream formatted with the RawTokenFormatter. This -lexer raises exceptions during parsing if the token stream in the -file is malformed.

-

Additional options accepted:

-
-
compress

If set to "gz" or "bz2", decompress the token stream with -the given compression algorithm before lexing (default: "").

-
-
-
- -
-
-class pygments.lexers.special.TextLexer¶
-
-
Short names
-

text

-
-
Filenames
-

*.txt

-
-
MIME types
-

text/plain

-
-
-

“Null” lexer, doesn’t highlight anything.

-
- -
- -
-

Lexer for Stata¶

-
-
-class pygments.lexers.stata.StataLexer¶
-
-
Short names
-

stata, do

-
-
Filenames
-

*.do, *.ado

-
-
MIME types
-

text/x-stata, text/stata, application/x-stata

-
-
-

For Stata do files.

-
-

New in version 2.2.

-
-
- -
-
-

Lexer for SuperCollider¶

-
-
-class pygments.lexers.supercollider.SuperColliderLexer¶
-
-
Short names
-

sc, supercollider

-
-
Filenames
-

*.sc, *.scd

-
-
MIME types
-

application/supercollider, text/supercollider

-
-
-

For SuperCollider source code.

-
-

New in version 2.1.

-
-
- -
- -
-

Lexers for various template engines’ markup¶

-
-
-class pygments.lexers.templates.Angular2HtmlLexer¶
-
-
Short names
-

html+ng2

-
-
Filenames
-

*.ng2

-
-
MIME types
-

None

-
-
-

Subclass of the Angular2Lexer that highlights unlexed data with the -HtmlLexer.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.templates.Angular2Lexer¶
-
-
Short names
-

ng2

-
-
Filenames
-

None

-
-
MIME types
-

None

-
-
-

Generic -angular2 -template lexer.

-

Highlights only the Angular template tags (stuff between {{ and }} and -special attributes: ‘(event)=’, ‘[property]=’, ‘[(twoWayBinding)]=’). -Everything else is left for a delegating lexer.

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.templates.CheetahHtmlLexer¶
-
-
Short names
-

html+cheetah, html+spitfire, htmlcheetah

-
-
Filenames
-

None

-
-
MIME types
-

text/html+cheetah, text/html+spitfire

-
-
-

Subclass of the CheetahLexer that highlights unlexed data -with the HtmlLexer.

-
- -
-
-class pygments.lexers.templates.CheetahJavascriptLexer¶
-
-
Short names
-

js+cheetah, javascript+cheetah, js+spitfire, javascript+spitfire

-
-
Filenames
-

None

-
-
MIME types
-

application/x-javascript+cheetah, text/x-javascript+cheetah, text/javascript+cheetah, application/x-javascript+spitfire, text/x-javascript+spitfire, text/javascript+spitfire

-
-
-

Subclass of the CheetahLexer that highlights unlexed data -with the JavascriptLexer.

-
- -
-
-class pygments.lexers.templates.CheetahLexer¶
-
-
Short names
-

cheetah, spitfire

-
-
Filenames
-

*.tmpl, *.spt

-
-
MIME types
-

application/x-cheetah, application/x-spitfire

-
-
-

Generic cheetah templates lexer. Code that isn’t Cheetah -markup is yielded as Token.Other. This also works for -spitfire templates which use the same syntax.

-
- -
-
-class pygments.lexers.templates.CheetahXmlLexer¶
-
-
Short names
-

xml+cheetah, xml+spitfire

-
-
Filenames
-

None

-
-
MIME types
-

application/xml+cheetah, application/xml+spitfire

-
-
-

Subclass of the CheetahLexer that highlights unlexed data -with the XmlLexer.

-
- -
-
-class pygments.lexers.templates.ColdfusionCFCLexer¶
-
-
Short names
-

cfc

-
-
Filenames
-

*.cfc

-
-
MIME types
-

None

-
-
-

Coldfusion markup/script components

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.templates.ColdfusionHtmlLexer¶
-
-
Short names
-

cfm

-
-
Filenames
-

*.cfm, *.cfml

-
-
MIME types
-

application/x-coldfusion

-
-
-

Coldfusion markup in html

-
- -
-
-class pygments.lexers.templates.ColdfusionLexer¶
-
-
Short names
-

cfs

-
-
Filenames
-

None

-
-
MIME types
-

None

-
-
-

Coldfusion statements

-
- -
-
-class pygments.lexers.templates.CssDjangoLexer¶
-
-
Short names
-

css+django, css+jinja

-
-
Filenames
-

None

-
-
MIME types
-

text/css+django, text/css+jinja

-
-
-

Subclass of the DjangoLexer that highlights unlexed data with the -CssLexer.

-
- -
-
-class pygments.lexers.templates.CssErbLexer¶
-
-
Short names
-

css+erb, css+ruby

-
-
Filenames
-

None

-
-
MIME types
-

text/css+ruby

-
-
-

Subclass of ErbLexer which highlights unlexed data with the CssLexer.

-
- -
-
-class pygments.lexers.templates.CssGenshiLexer¶
-
-
Short names
-

css+genshitext, css+genshi

-
-
Filenames
-

None

-
-
MIME types
-

text/css+genshi

-
-
-

A lexer that highlights CSS definitions in genshi text templates.

-
- -
-
-class pygments.lexers.templates.CssPhpLexer¶
-
-
Short names
-

css+php

-
-
Filenames
-

None

-
-
MIME types
-

text/css+php

-
-
-

Subclass of PhpLexer which highlights unmatched data with the CssLexer.

-
- -
-
-class pygments.lexers.templates.CssSmartyLexer¶
-
-
Short names
-

css+smarty

-
-
Filenames
-

None

-
-
MIME types
-

text/css+smarty

-
-
-

Subclass of the SmartyLexer that highlights unlexed data with the -CssLexer.

-
- -
-
-class pygments.lexers.templates.DjangoLexer¶
-
-
Short names
-

django, jinja

-
-
Filenames
-

None

-
-
MIME types
-

application/x-django-templating, application/x-jinja

-
-
-

Generic django -and jinja template lexer.

-

It just highlights django/jinja code between the preprocessor directives, -other data is left untouched by the lexer.

-
- -
-
-class pygments.lexers.templates.ErbLexer¶
-
-
Short names
-

erb

-
-
Filenames
-

None

-
-
MIME types
-

application/x-ruby-templating

-
-
-

Generic ERB (Ruby Templating) -lexer.

-

Just highlights ruby code between the preprocessor directives, other data -is left untouched by the lexer.

-

All options are also forwarded to the RubyLexer.

-
- -
-
-class pygments.lexers.templates.EvoqueHtmlLexer¶
-
-
Short names
-

html+evoque

-
-
Filenames
-

*.html

-
-
MIME types
-

text/html+evoque

-
-
-

Subclass of the EvoqueLexer that highlights unlexed data with the -HtmlLexer.

-
-

New in version 1.1.

-
-
- -
-
-class pygments.lexers.templates.EvoqueLexer¶
-
-
Short names
-

evoque

-
-
Filenames
-

*.evoque

-
-
MIME types
-

application/x-evoque

-
-
-

For files using the Evoque templating system.

-
-

New in version 1.1.

-
-
- -
-
-class pygments.lexers.templates.EvoqueXmlLexer¶
-
-
Short names
-

xml+evoque

-
-
Filenames
-

*.xml

-
-
MIME types
-

application/xml+evoque

-
-
-

Subclass of the EvoqueLexer that highlights unlexed data with the -XmlLexer.

-
-

New in version 1.1.

-
-
- -
-
-class pygments.lexers.templates.GenshiLexer¶
-
-
Short names
-

genshi, kid, xml+genshi, xml+kid

-
-
Filenames
-

*.kid

-
-
MIME types
-

application/x-genshi, application/x-kid

-
-
-

A lexer that highlights genshi and -kid kid XML templates.

-
- -
-
-class pygments.lexers.templates.GenshiTextLexer¶
-
-
Short names
-

genshitext

-
-
Filenames
-

None

-
-
MIME types
-

application/x-genshi-text, text/x-genshi

-
-
-

A lexer that highlights genshi text -templates.

-
- -
-
-class pygments.lexers.templates.HandlebarsHtmlLexer¶
-
-
Short names
-

html+handlebars

-
-
Filenames
-

*.handlebars, *.hbs

-
-
MIME types
-

text/html+handlebars, text/x-handlebars-template

-
-
-

Subclass of the HandlebarsLexer that highlights unlexed data with the -HtmlLexer.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.templates.HandlebarsLexer¶
-
-
Short names
-

handlebars

-
-
Filenames
-

None

-
-
MIME types
-

None

-
-
-

Generic handlebars <http://handlebarsjs.com/> template lexer.

-

Highlights only the Handlebars template tags (stuff between {{ and }}). -Everything else is left for a delegating lexer.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.templates.HtmlDjangoLexer¶
-
-
Short names
-

html+django, html+jinja, htmldjango

-
-
Filenames
-

None

-
-
MIME types
-

text/html+django, text/html+jinja

-
-
-

Subclass of the DjangoLexer that highlights unlexed data with the -HtmlLexer.

-

Nested Javascript and CSS is highlighted too.

-
- -
-
-class pygments.lexers.templates.HtmlGenshiLexer¶
-
-
Short names
-

html+genshi, html+kid

-
-
Filenames
-

None

-
-
MIME types
-

text/html+genshi

-
-
-

A lexer that highlights genshi and -kid kid HTML templates.

-
- -
-
-class pygments.lexers.templates.HtmlPhpLexer¶
-
-
Short names
-

html+php

-
-
Filenames
-

*.phtml

-
-
MIME types
-

application/x-php, application/x-httpd-php, application/x-httpd-php3, application/x-httpd-php4, application/x-httpd-php5

-
-
-

Subclass of PhpLexer that highlights unhandled data with the HtmlLexer.

-

Nested Javascript and CSS is highlighted too.

-
- -
-
-class pygments.lexers.templates.HtmlSmartyLexer¶
-
-
Short names
-

html+smarty

-
-
Filenames
-

None

-
-
MIME types
-

text/html+smarty

-
-
-

Subclass of the SmartyLexer that highlights unlexed data with the -HtmlLexer.

-

Nested Javascript and CSS is highlighted too.

-
- -
-
-class pygments.lexers.templates.JavascriptDjangoLexer¶
-
-
Short names
-

js+django, javascript+django, js+jinja, javascript+jinja

-
-
Filenames
-

None

-
-
MIME types
-

application/x-javascript+django, application/x-javascript+jinja, text/x-javascript+django, text/x-javascript+jinja, text/javascript+django, text/javascript+jinja

-
-
-

Subclass of the DjangoLexer that highlights unlexed data with the -JavascriptLexer.

-
- -
-
-class pygments.lexers.templates.JavascriptErbLexer¶
-
-
Short names
-

js+erb, javascript+erb, js+ruby, javascript+ruby

-
-
Filenames
-

None

-
-
MIME types
-

application/x-javascript+ruby, text/x-javascript+ruby, text/javascript+ruby

-
-
-

Subclass of ErbLexer which highlights unlexed data with the -JavascriptLexer.

-
- -
-
-class pygments.lexers.templates.JavascriptGenshiLexer¶
-
-
Short names
-

js+genshitext, js+genshi, javascript+genshitext, javascript+genshi

-
-
Filenames
-

None

-
-
MIME types
-

application/x-javascript+genshi, text/x-javascript+genshi, text/javascript+genshi

-
-
-

A lexer that highlights javascript code in genshi text templates.

-
- -
-
-class pygments.lexers.templates.JavascriptPhpLexer¶
-
-
Short names
-

js+php, javascript+php

-
-
Filenames
-

None

-
-
MIME types
-

application/x-javascript+php, text/x-javascript+php, text/javascript+php

-
-
-

Subclass of PhpLexer which highlights unmatched data with the -JavascriptLexer.

-
- -
-
-class pygments.lexers.templates.JavascriptSmartyLexer¶
-
-
Short names
-

js+smarty, javascript+smarty

-
-
Filenames
-

None

-
-
MIME types
-

application/x-javascript+smarty, text/x-javascript+smarty, text/javascript+smarty

-
-
-

Subclass of the SmartyLexer that highlights unlexed data with the -JavascriptLexer.

-
- -
-
-class pygments.lexers.templates.JspLexer¶
-
-
Short names
-

jsp

-
-
Filenames
-

*.jsp

-
-
MIME types
-

application/x-jsp

-
-
-

Lexer for Java Server Pages.

-
-

New in version 0.7.

-
-
- -
-
-class pygments.lexers.templates.LassoCssLexer¶
-
-
Short names
-

css+lasso

-
-
Filenames
-

None

-
-
MIME types
-

text/css+lasso

-
-
-

Subclass of the LassoLexer which highlights unhandled data with the -CssLexer.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.templates.LassoHtmlLexer¶
-
-
Short names
-

html+lasso

-
-
Filenames
-

None

-
-
MIME types
-

text/html+lasso, application/x-httpd-lasso, application/x-httpd-lasso[89]

-
-
-

Subclass of the LassoLexer which highlights unhandled data with the -HtmlLexer.

-

Nested JavaScript and CSS is also highlighted.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.templates.LassoJavascriptLexer¶
-
-
Short names
-

js+lasso, javascript+lasso

-
-
Filenames
-

None

-
-
MIME types
-

application/x-javascript+lasso, text/x-javascript+lasso, text/javascript+lasso

-
-
-

Subclass of the LassoLexer which highlights unhandled data with the -JavascriptLexer.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.templates.LassoXmlLexer¶
-
-
Short names
-

xml+lasso

-
-
Filenames
-

None

-
-
MIME types
-

application/xml+lasso

-
-
-

Subclass of the LassoLexer which highlights unhandled data with the -XmlLexer.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.templates.LiquidLexer¶
-
-
Short names
-

liquid

-
-
Filenames
-

*.liquid

-
-
MIME types
-

None

-
-
-

Lexer for Liquid templates.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.templates.MakoCssLexer¶
-
-
Short names
-

css+mako

-
-
Filenames
-

None

-
-
MIME types
-

text/css+mako

-
-
-

Subclass of the MakoLexer that highlights unlexed data -with the CssLexer.

-
-

New in version 0.7.

-
-
- -
-
-class pygments.lexers.templates.MakoHtmlLexer¶
-
-
Short names
-

html+mako

-
-
Filenames
-

None

-
-
MIME types
-

text/html+mako

-
-
-

Subclass of the MakoLexer that highlights unlexed data -with the HtmlLexer.

-
-

New in version 0.7.

-
-
- -
-
-class pygments.lexers.templates.MakoJavascriptLexer¶
-
-
Short names
-

js+mako, javascript+mako

-
-
Filenames
-

None

-
-
MIME types
-

application/x-javascript+mako, text/x-javascript+mako, text/javascript+mako

-
-
-

Subclass of the MakoLexer that highlights unlexed data -with the JavascriptLexer.

-
-

New in version 0.7.

-
-
- -
-
-class pygments.lexers.templates.MakoLexer¶
-
-
Short names
-

mako

-
-
Filenames
-

*.mao

-
-
MIME types
-

application/x-mako

-
-
-

Generic mako templates lexer. Code that isn’t Mako -markup is yielded as Token.Other.

-
-

New in version 0.7.

-
-
- -
-
-class pygments.lexers.templates.MakoXmlLexer¶
-
-
Short names
-

xml+mako

-
-
Filenames
-

None

-
-
MIME types
-

application/xml+mako

-
-
-

Subclass of the MakoLexer that highlights unlexed data -with the XmlLexer.

-
-

New in version 0.7.

-
-
- -
-
-class pygments.lexers.templates.MasonLexer¶
-
-
Short names
-

mason

-
-
Filenames
-

*.m, *.mhtml, *.mc, *.mi, autohandler, dhandler

-
-
MIME types
-

application/x-mason

-
-
-

Generic mason templates lexer. Stolen from Myghty lexer. Code that isn’t -Mason markup is HTML.

-
-

New in version 1.4.

-
-
- -
-
-class pygments.lexers.templates.MyghtyCssLexer¶
-
-
Short names
-

css+myghty

-
-
Filenames
-

None

-
-
MIME types
-

text/css+myghty

-
-
-

Subclass of the MyghtyLexer that highlights unlexed data -with the CssLexer.

-
-

New in version 0.6.

-
-
- -
-
-class pygments.lexers.templates.MyghtyHtmlLexer¶
-
-
Short names
-

html+myghty

-
-
Filenames
-

None

-
-
MIME types
-

text/html+myghty

-
-
-

Subclass of the MyghtyLexer that highlights unlexed data -with the HtmlLexer.

-
-

New in version 0.6.

-
-
- -
-
-class pygments.lexers.templates.MyghtyJavascriptLexer¶
-
-
Short names
-

js+myghty, javascript+myghty

-
-
Filenames
-

None

-
-
MIME types
-

application/x-javascript+myghty, text/x-javascript+myghty, text/javascript+mygthy

-
-
-

Subclass of the MyghtyLexer that highlights unlexed data -with the JavascriptLexer.

-
-

New in version 0.6.

-
-
- -
-
-class pygments.lexers.templates.MyghtyLexer¶
-
-
Short names
-

myghty

-
-
Filenames
-

*.myt, autodelegate

-
-
MIME types
-

application/x-myghty

-
-
-

Generic myghty templates lexer. Code that isn’t Myghty -markup is yielded as Token.Other.

-
-

New in version 0.6.

-
-
- -
-
-class pygments.lexers.templates.MyghtyXmlLexer¶
-
-
Short names
-

xml+myghty

-
-
Filenames
-

None

-
-
MIME types
-

application/xml+myghty

-
-
-

Subclass of the MyghtyLexer that highlights unlexed data -with the XmlLexer.

-
-

New in version 0.6.

-
-
- -
-
-class pygments.lexers.templates.RhtmlLexer¶
-
-
Short names
-

rhtml, html+erb, html+ruby

-
-
Filenames
-

*.rhtml

-
-
MIME types
-

text/html+ruby

-
-
-

Subclass of the ERB lexer that highlights the unlexed data with the -html lexer.

-

Nested Javascript and CSS is highlighted too.

-
- -
-
-class pygments.lexers.templates.SmartyLexer¶
-
-
Short names
-

smarty

-
-
Filenames
-

*.tpl

-
-
MIME types
-

application/x-smarty

-
-
-

Generic Smarty template lexer.

-

Just highlights smarty code between the preprocessor directives, other -data is left untouched by the lexer.

-
- -
-
-class pygments.lexers.templates.SspLexer¶
-
-
Short names
-

ssp

-
-
Filenames
-

*.ssp

-
-
MIME types
-

application/x-ssp

-
-
-

Lexer for Scalate Server Pages.

-
-

New in version 1.4.

-
-
- -
-
-class pygments.lexers.templates.TeaTemplateLexer¶
-
-
Short names
-

tea

-
-
Filenames
-

*.tea

-
-
MIME types
-

text/x-tea

-
-
-

Lexer for Tea Templates.

-
-

New in version 1.5.

-
-
- -
-
-class pygments.lexers.templates.TwigHtmlLexer¶
-
-
Short names
-

html+twig

-
-
Filenames
-

*.twig

-
-
MIME types
-

text/html+twig

-
-
-

Subclass of the TwigLexer that highlights unlexed data with the -HtmlLexer.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.templates.TwigLexer¶
-
-
Short names
-

twig

-
-
Filenames
-

None

-
-
MIME types
-

application/x-twig

-
-
-

Twig template lexer.

-

It just highlights Twig code between the preprocessor directives, -other data is left untouched by the lexer.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.templates.VelocityHtmlLexer¶
-
-
Short names
-

html+velocity

-
-
Filenames
-

None

-
-
MIME types
-

text/html+velocity

-
-
-

Subclass of the VelocityLexer that highlights unlexed data -with the HtmlLexer.

-
- -
-
-class pygments.lexers.templates.VelocityLexer¶
-
-
Short names
-

velocity

-
-
Filenames
-

*.vm, *.fhtml

-
-
MIME types
-

None

-
-
-

Generic Velocity template lexer.

-

Just highlights velocity directives and variable references, other -data is left untouched by the lexer.

-
- -
-
-class pygments.lexers.templates.VelocityXmlLexer¶
-
-
Short names
-

xml+velocity

-
-
Filenames
-

None

-
-
MIME types
-

application/xml+velocity

-
-
-

Subclass of the VelocityLexer that highlights unlexed data -with the XmlLexer.

-
- -
-
-class pygments.lexers.templates.XmlDjangoLexer¶
-
-
Short names
-

xml+django, xml+jinja

-
-
Filenames
-

None

-
-
MIME types
-

application/xml+django, application/xml+jinja

-
-
-

Subclass of the DjangoLexer that highlights unlexed data with the -XmlLexer.

-
- -
-
-class pygments.lexers.templates.XmlErbLexer¶
-
-
Short names
-

xml+erb, xml+ruby

-
-
Filenames
-

None

-
-
MIME types
-

application/xml+ruby

-
-
-

Subclass of ErbLexer which highlights data outside preprocessor -directives with the XmlLexer.

-
- -
-
-class pygments.lexers.templates.XmlPhpLexer¶
-
-
Short names
-

xml+php

-
-
Filenames
-

None

-
-
MIME types
-

application/xml+php

-
-
-

Subclass of PhpLexer that highlights unhandled data with the XmlLexer.

-
- -
-
-class pygments.lexers.templates.XmlSmartyLexer¶
-
-
Short names
-

xml+smarty

-
-
Filenames
-

None

-
-
MIME types
-

application/xml+smarty

-
-
-

Subclass of the SmartyLexer that highlights unlexed data with the -XmlLexer.

-
- -
-
-class pygments.lexers.templates.YamlJinjaLexer¶
-
-
Short names
-

yaml+jinja, salt, sls

-
-
Filenames
-

*.sls

-
-
MIME types
-

text/x-yaml+jinja, text/x-sls

-
-
-

Subclass of the DjangoLexer that highlights unlexed data with the -YamlLexer.

-

Commonly used in Saltstack salt states.

-
-

New in version 2.0.

-
-
- -
-
-

Lexer for Tera Term macro files¶

-
-
-class pygments.lexers.teraterm.TeraTermLexer¶
-
-
Short names
-

ttl, teraterm, teratermmacro

-
-
Filenames
-

*.ttl

-
-
MIME types
-

text/x-teratermmacro

-
-
-

For Tera Term macro source code.

-
-

New in version 2.4.

-
-
- -
-
-

Lexers for testing languages¶

-
-
-class pygments.lexers.testing.GherkinLexer¶
-
-
Short names
-

cucumber, gherkin

-
-
Filenames
-

*.feature

-
-
MIME types
-

text/x-gherkin

-
-
-

For Gherkin <http://github.com/aslakhellesoy/gherkin/> syntax.

-
-

New in version 1.2.

-
-
- -
-
-class pygments.lexers.testing.TAPLexer¶
-
-
Short names
-

tap

-
-
Filenames
-

*.tap

-
-
MIME types
-

None

-
-
-

For Test Anything Protocol (TAP) output.

-
-

New in version 2.1.

-
-
- -
- -
-

Lexers for various text formats¶

-
-
-class pygments.lexers.textfmts.GettextLexer¶
-
-
Short names
-

pot, po

-
-
Filenames
-

*.pot, *.po

-
-
MIME types
-

application/x-gettext, text/x-gettext, text/gettext

-
-
-

Lexer for Gettext catalog files.

-
-

New in version 0.9.

-
-
- -
-
-class pygments.lexers.textfmts.HttpLexer¶
-
-
Short names
-

http

-
-
Filenames
-

None

-
-
MIME types
-

None

-
-
-

Lexer for HTTP sessions.

-
-

New in version 1.5.

-
-
- -
-
-class pygments.lexers.textfmts.IrcLogsLexer¶
-
-
Short names
-

irc

-
-
Filenames
-

*.weechatlog

-
-
MIME types
-

text/x-irclog

-
-
-

Lexer for IRC logs in irssi, xchat or weechat style.

-
- -
-
-class pygments.lexers.textfmts.NotmuchLexer¶
-
-
Short names
-

notmuch

-
-
Filenames
-

None

-
-
MIME types
-

None

-
-
-

For Notmuch email text format.

-
-

New in version 2.5.

-
-

Additional options accepted:

-
-
body_lexer

If given, highlight the contents of the message body with the specified -lexer, else guess it according to the body content (default: None).

-
-
-
- -
-
-class pygments.lexers.textfmts.TodotxtLexer¶
-
-
Short names
-

todotxt

-
-
Filenames
-

todo.txt, *.todotxt

-
-
MIME types
-

text/x-todo

-
-
-

Lexer for Todo.txt todo list format.

-
-

New in version 2.0.

-
-
- -
-
-

Lexers for theorem-proving languages¶

-
-
-class pygments.lexers.theorem.CoqLexer¶
-
-
Short names
-

coq

-
-
Filenames
-

*.v

-
-
MIME types
-

text/x-coq

-
-
-

For the Coq theorem prover.

-
-

New in version 1.5.

-
-
- -
-
-class pygments.lexers.theorem.IsabelleLexer¶
-
-
Short names
-

isabelle

-
-
Filenames
-

*.thy

-
-
MIME types
-

text/x-isabelle

-
-
-

For the Isabelle proof assistant.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.theorem.LeanLexer¶
-
-
Short names
-

lean

-
-
Filenames
-

*.lean

-
-
MIME types
-

text/x-lean

-
-
-

For the Lean -theorem prover.

-
-

New in version 2.0.

-
-
- -
-
-

Lexer for RiverBed’s TrafficScript (RTS) language¶

-
-
-class pygments.lexers.trafficscript.RtsLexer¶
-
-
Short names
-

rts, trafficscript

-
-
Filenames
-

*.rts

-
-
MIME types
-

None

-
-
-

For Riverbed Stingray Traffic Manager

-
-

New in version 2.1.

-
-
- -
-
-

Lexers for TypoScript¶

-
-
-class pygments.lexers.typoscript.TypoScriptCssDataLexer¶
-
-
Short names
-

typoscriptcssdata

-
-
Filenames
-

None

-
-
MIME types
-

None

-
-
-

Lexer that highlights markers, constants and registers within css blocks.

-
-

New in version 2.2.

-
-
- -
-
-class pygments.lexers.typoscript.TypoScriptHtmlDataLexer¶
-
-
Short names
-

typoscripthtmldata

-
-
Filenames
-

None

-
-
MIME types
-

None

-
-
-

Lexer that highlights markers, constants and registers within html tags.

-
-

New in version 2.2.

-
-
- -
-
-class pygments.lexers.typoscript.TypoScriptLexer¶
-
-
Short names
-

typoscript

-
-
Filenames
-

*.typoscript

-
-
MIME types
-

text/x-typoscript

-
-
-

Lexer for TypoScript code.

-

http://docs.typo3.org/typo3cms/TyposcriptReference/

-
-

New in version 2.2.

-
-
- -
-
-

Lexers for the Icon and Unicon languages, including ucode VM¶

-
-
-class pygments.lexers.unicon.IconLexer¶
-
-
Short names
-

icon

-
-
Filenames
-

*.icon, *.ICON

-
-
MIME types
-

None

-
-
-

Lexer for Icon.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.unicon.UcodeLexer¶
-
-
Short names
-

ucode

-
-
Filenames
-

*.u, *.u1, *.u2

-
-
MIME types
-

None

-
-
-

Lexer for Icon ucode files.

-
-

New in version 2.4.

-
-
- -
-
-class pygments.lexers.unicon.UniconLexer¶
-
-
Short names
-

unicon

-
-
Filenames
-

*.icn

-
-
MIME types
-

text/unicon

-
-
-

For Unicon source code.

-
-

New in version 2.4.

-
-
- -
-
-

Lexers for UrbiScript language¶

-
-
-class pygments.lexers.urbi.UrbiscriptLexer¶
-
-
Short names
-

urbiscript

-
-
Filenames
-

*.u

-
-
MIME types
-

application/x-urbiscript

-
-
-

For UrbiScript source code.

-
-

New in version 1.5.

-
-
- -
-
-

Lexers for Varnish configuration¶

-
-
-class pygments.lexers.varnish.VCLLexer¶
-
-
Short names
-

vcl

-
-
Filenames
-

*.vcl

-
-
MIME types
-

text/x-vclsrc

-
-
-

For Varnish Configuration Language (VCL).

-
-

New in version 2.2.

-
-
- -
-
-class pygments.lexers.varnish.VCLSnippetLexer¶
-
-
Short names
-

vclsnippets, vclsnippet

-
-
Filenames
-

None

-
-
MIME types
-

text/x-vclsnippet

-
-
-

For Varnish Configuration Language snippets.

-
-

New in version 2.2.

-
-
- -
-
-

Lexer for Intermediate Verification Languages (IVLs)¶

-
-
-class pygments.lexers.verification.BoogieLexer¶
-
-
Short names
-

boogie

-
-
Filenames
-

*.bpl

-
-
MIME types
-

None

-
-
-

For Boogie source code.

-
-

New in version 2.1.

-
-
- -
-
-class pygments.lexers.verification.SilverLexer¶
-
-
Short names
-

silver

-
-
Filenames
-

*.sil, *.vpr

-
-
MIME types
-

None

-
-
-

For Silver source code.

-
-

New in version 2.2.

-
-
- -
-
-

Lexers for misc. web stuff¶

-
-
-class pygments.lexers.webmisc.CirruLexer¶
-
-
Short names
-

cirru

-
-
Filenames
-

*.cirru

-
-
MIME types
-

text/x-cirru

-
-
-

Syntax rules of Cirru can be found at: -http://cirru.org/

-
    -
  • using () for expressions, but restricted in a same line

  • -
  • using "" for strings, with \ for escaping chars

  • -
  • using $ as folding operator

  • -
  • using , as unfolding operator

  • -
  • using indentations for nested blocks

  • -
-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.webmisc.DuelLexer¶
-
-
Short names
-

duel, jbst, jsonml+bst

-
-
Filenames
-

*.duel, *.jbst

-
-
MIME types
-

text/x-duel, text/x-jbst

-
-
-

Lexer for Duel Views Engine (formerly JBST) markup with JavaScript code blocks. -See http://duelengine.org/. -See http://jsonml.org/jbst/.

-
-

New in version 1.4.

-
-
- -
-
-class pygments.lexers.webmisc.QmlLexer¶
-
-
Short names
-

qml, qbs

-
-
Filenames
-

*.qml, *.qbs

-
-
MIME types
-

application/x-qml, application/x-qt.qbs+qml

-
-
-

For QML files. See http://doc.qt.digia.com/4.7/qdeclarativeintroduction.html.

-
-

New in version 1.6.

-
-
- -
-
-class pygments.lexers.webmisc.SlimLexer¶
-
-
Short names
-

slim

-
-
Filenames
-

*.slim

-
-
MIME types
-

text/x-slim

-
-
-

For Slim markup.

-
-

New in version 2.0.

-
-
- -
-
-class pygments.lexers.webmisc.XQueryLexer¶
-
-
Short names
-

xquery, xqy, xq, xql, xqm

-
-
Filenames
-

*.xqy, *.xquery, *.xq, *.xql, *.xqm

-
-
MIME types
-

text/xquery, application/xquery

-
-
-

An XQuery lexer, parsing a stream and outputting the tokens needed to -highlight xquery code.

-
-

New in version 1.4.

-
-
- -
-
-

Lexers for the Whiley language¶

-
-
-class pygments.lexers.whiley.WhileyLexer¶
-
-
Short names
-

whiley

-
-
Filenames
-

*.whiley

-
-
MIME types
-

text/x-whiley

-
-
-

Lexer for the Whiley programming language.

-
-

New in version 2.2.

-
-
- -
-
-

Lexers for the X10 programming language¶

-
-
-class pygments.lexers.x10.X10Lexer¶
-
-
Short names
-

x10, xten

-
-
Filenames
-

*.x10

-
-
MIME types
-

text/x-x10

-
-
-

For the X10 language.

-
-

New in version 0.1.

-
-
- -
-
-

Lexers for Xorg configs¶

-
-
-class pygments.lexers.xorg.XorgLexer¶
-
-
Short names
-

xorg.conf

-
-
Filenames
-

xorg.conf

-
-
MIME types
-

None

-
-
-

Lexer for xorg.conf file.

-
- -
-
-

Lexers for Zig¶

-
-
-class pygments.lexers.zig.ZigLexer¶
-
-
Short names
-

zig

-
-
Filenames
-

*.zig

-
-
MIME types
-

text/zig

-
-
-

For Zig source code.

-

grammar: https://ziglang.org/documentation/master/#Grammar

-
- -
-
-

Iterating over all lexers¶

-
-

New in version 0.6.

-
-

To get all lexers (both the builtin and the plugin ones), you can -use the get_all_lexers() function from the pygments.lexers -module:

-
>>> from pygments.lexers import get_all_lexers
->>> i = get_all_lexers()
->>> i.next()
-('Diff', ('diff',), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch'))
->>> i.next()
-('Delphi', ('delphi', 'objectpascal', 'pas', 'pascal'), ('*.pas',), ('text/x-pascal',))
->>> i.next()
-('XML+Ruby', ('xml+erb', 'xml+ruby'), (), ())
-
-
-

As you can see, the return value is an iterator which yields tuples -in the form (name, aliases, filetypes, mimetypes).

-
-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/docs/moinmoin.html b/doc/_build/html/docs/moinmoin.html deleted file mode 100644 index e3d72b9..0000000 --- a/doc/_build/html/docs/moinmoin.html +++ /dev/null @@ -1,154 +0,0 @@ - - - - - - - Using Pygments with MoinMoin — Pygments - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Using Pygments with MoinMoin¶

-

From Pygments 0.7, the source distribution ships a Moin parser plugin that -can be used to get Pygments highlighting in Moin wiki pages.

-

To use it, copy the file external/moin-parser.py from the Pygments -distribution to the data/plugin/parser subdirectory of your Moin instance. -Edit the options at the top of the file (currently ATTACHMENTS and -INLINESTYLES) and rename the file to the name that the parser directive -should have. For example, if you name the file code.py, you can get a -highlighted Python code sample with this Wiki markup:

-
{{{
-#!code python
-[...]
-}}}
-
-
-

where python is the Pygments name of the lexer to use.

-

Additionally, if you set the ATTACHMENTS option to True, Pygments will also -be called for all attachments for whose filenames there is no other parser -registered.

-

You are responsible for including CSS rules that will map the Pygments CSS -classes to colors. You can output a stylesheet file with pygmentize, put it -into the htdocs directory of your Moin instance and then include it in the -stylesheets configuration option in the Moin config, e.g.:

-
stylesheets = [('screen', '/htdocs/pygments.css')]
-
-
-

If you do not want to do that and are willing to accept larger HTML output, you -can set the INLINESTYLES option to True.

-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/docs/plugins.html b/doc/_build/html/docs/plugins.html deleted file mode 100644 index 2cb93bf..0000000 --- a/doc/_build/html/docs/plugins.html +++ /dev/null @@ -1,206 +0,0 @@ - - - - - - - Register Plugins — Pygments - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Register Plugins¶

-

If you want to extend Pygments without hacking the sources, but want to -use the lexer/formatter/style/filter lookup functions (lexers.get_lexer_by_name -et al.), you can use setuptools entrypoints to add new lexers, formatters -or styles as if they were in the Pygments core.

-

That means you can use your highlighter modules with the pygmentize script, -which relies on the mentioned functions.

-
-

Entrypoints¶

-

Here is a list of setuptools entrypoints that Pygments understands:

-

pygments.lexers

-
-

This entrypoint is used for adding new lexers to the Pygments core. -The name of the entrypoint values doesn’t really matter, Pygments extracts -required metadata from the class definition:

-
[pygments.lexers]
-yourlexer = yourmodule:YourLexer
-
-
-

Note that you have to define name, aliases and filename -attributes so that you can use the highlighter from the command line:

-
class YourLexer(...):
-    name = 'Name Of Your Lexer'
-    aliases = ['alias']
-    filenames = ['*.ext']
-
-
-
-

pygments.formatters

-
-

You can use this entrypoint to add new formatters to Pygments. The -name of an entrypoint item is the name of the formatter. If you -prefix the name with a slash it’s used as a filename pattern:

-
[pygments.formatters]
-yourformatter = yourmodule:YourFormatter
-/.ext = yourmodule:YourFormatter
-
-
-
-

pygments.styles

-
-

To add a new style you can use this entrypoint. The name of the entrypoint -is the name of the style:

-
[pygments.styles]
-yourstyle = yourmodule:YourStyle
-
-
-
-

pygments.filters

-
-

Use this entrypoint to register a new filter. The name of the -entrypoint is the name of the filter:

-
[pygments.filters]
-yourfilter = yourmodule:YourFilter
-
-
-
-
-
-

How To Use Entrypoints¶

-

This documentation doesn’t explain how to use those entrypoints because this is -covered in the setuptools documentation. That page should cover everything -you need to write a plugin.

-
-
-

Extending The Core¶

-

If you have written a Pygments plugin that is open source, please inform us -about that. There is a high chance that we’ll add it to the Pygments -distribution.

-
-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/docs/quickstart.html b/doc/_build/html/docs/quickstart.html deleted file mode 100644 index fa259ba..0000000 --- a/doc/_build/html/docs/quickstart.html +++ /dev/null @@ -1,302 +0,0 @@ - - - - - - - Introduction and Quickstart — Pygments - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Introduction and Quickstart¶

-

Welcome to Pygments! This document explains the basic concepts and terms and -gives a few examples of how to use the library.

-
-

Architecture¶

-

There are four types of components that work together highlighting a piece of -code:

-
    -
  • A lexer splits the source into tokens, fragments of the source that -have a token type that determines what the text represents semantically -(e.g., keyword, string, or comment). There is a lexer for every language -or markup format that Pygments supports.

  • -
  • The token stream can be piped through filters, which usually modify -the token types or text fragments, e.g. uppercasing all keywords.

  • -
  • A formatter then takes the token stream and writes it to an output -file, in a format such as HTML, LaTeX or RTF.

  • -
  • While writing the output, a style determines how to highlight all the -different token types. It maps them to attributes like “red and bold”.

  • -
-
-
-

Example¶

-

Here is a small example for highlighting Python code:

-
from pygments import highlight
-from pygments.lexers import PythonLexer
-from pygments.formatters import HtmlFormatter
-
-code = 'print "Hello World"'
-print(highlight(code, PythonLexer(), HtmlFormatter()))
-
-
-

which prints something like this:

-
<div class="highlight">
-<pre><span class="k">print</span> <span class="s">&quot;Hello World&quot;</span></pre>
-</div>
-
-
-

As you can see, Pygments uses CSS classes (by default, but you can change that) -instead of inline styles in order to avoid outputting redundant style information over -and over. A CSS stylesheet that contains all CSS classes possibly used in the output -can be produced by:

-
print(HtmlFormatter().get_style_defs('.highlight'))
-
-
-

The argument to get_style_defs() is used as an additional CSS selector: -the output may look like this:

-
.highlight .k { color: #AA22FF; font-weight: bold }
-.highlight .s { color: #BB4444 }
-...
-
-
-
-
-

Options¶

-

The highlight() function supports a fourth argument called outfile, it -must be a file object if given. The formatted output will then be written to -this file instead of being returned as a string.

-

Lexers and formatters both support options. They are given to them as keyword -arguments either to the class or to the lookup method:

-
from pygments import highlight
-from pygments.lexers import get_lexer_by_name
-from pygments.formatters import HtmlFormatter
-
-lexer = get_lexer_by_name("python", stripall=True)
-formatter = HtmlFormatter(linenos=True, cssclass="source")
-result = highlight(code, lexer, formatter)
-
-
-

This makes the lexer strip all leading and trailing whitespace from the input -(stripall option), lets the formatter output line numbers (linenos option), -and sets the wrapping <div>’s class to source (instead of -highlight).

-

Important options include:

-
-
encodingfor lexers and formatters

Since Pygments uses Unicode strings internally, this determines which -encoding will be used to convert to or from byte strings.

-
-
stylefor formatters

The name of the style to use when writing the output.

-
-
-

For an overview of builtin lexers and formatters and their options, visit the -lexer and formatters lists.

-

For a documentation on filters, see this page.

-
-
-

Lexer and formatter lookup¶

-

If you want to lookup a built-in lexer by its alias or a filename, you can use -one of the following methods:

-
>>> from pygments.lexers import (get_lexer_by_name,
-...     get_lexer_for_filename, get_lexer_for_mimetype)
-
->>> get_lexer_by_name('python')
-<pygments.lexers.PythonLexer>
-
->>> get_lexer_for_filename('spam.rb')
-<pygments.lexers.RubyLexer>
-
->>> get_lexer_for_mimetype('text/x-perl')
-<pygments.lexers.PerlLexer>
-
-
-

All these functions accept keyword arguments; they will be passed to the lexer -as options.

-

A similar API is available for formatters: use get_formatter_by_name() -and get_formatter_for_filename() from the pygments.formatters -module for this purpose.

-
-
-

Guessing lexers¶

-

If you don’t know the content of the file, or you want to highlight a file -whose extension is ambiguous, such as .html (which could contain plain HTML -or some template tags), use these functions:

-
>>> from pygments.lexers import guess_lexer, guess_lexer_for_filename
-
->>> guess_lexer('#!/usr/bin/python\nprint "Hello World!"')
-<pygments.lexers.PythonLexer>
-
->>> guess_lexer_for_filename('test.py', 'print "Hello World!"')
-<pygments.lexers.PythonLexer>
-
-
-

guess_lexer() passes the given content to the lexer classes’ -analyse_text() method and returns the one for which it returns the -highest number.

-

All lexers have two different filename pattern lists: the primary and the -secondary one. The get_lexer_for_filename() function only uses the -primary list, whose entries are supposed to be unique among all lexers. -guess_lexer_for_filename(), however, will first loop through all lexers -and look at the primary and secondary filename patterns if the filename matches. -If only one lexer matches, it is returned, else the guessing mechanism of -guess_lexer() is used with the matching lexers.

-

As usual, keyword arguments to these functions are given to the created lexer -as options.

-
-
-

Command line usage¶

-

You can use Pygments from the command line, using the pygmentize -script:

-
$ pygmentize test.py
-
-
-

will highlight the Python file test.py using ANSI escape sequences -(a.k.a. terminal colors) and print the result to standard output.

-

To output HTML, use the -f option:

-
$ pygmentize -f html -o test.html test.py
-
-
-

to write an HTML-highlighted version of test.py to the file test.html. -Note that it will only be a snippet of HTML, if you want a full HTML document, -use the “full” option:

-
$ pygmentize -f html -O full -o test.html test.py
-
-
-

This will produce a full HTML document with included stylesheet.

-

A style can be selected with -O style=<name>.

-

If you need a stylesheet for an existing HTML file using Pygments CSS classes, -it can be created with:

-
$ pygmentize -S default -f html > style.css
-
-
-

where default is the style name.

-

More options and tricks and be found in the command line reference.

-
-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/docs/rstdirective.html b/doc/_build/html/docs/rstdirective.html deleted file mode 100644 index 662330d..0000000 --- a/doc/_build/html/docs/rstdirective.html +++ /dev/null @@ -1,134 +0,0 @@ - - - - - - - Using Pygments in ReST documents — Pygments - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Using Pygments in ReST documents¶

-

Many Python people use ReST for documentation their sourcecode, programs, -scripts et cetera. This also means that documentation often includes sourcecode -samples or snippets.

-

You can easily enable Pygments support for your ReST texts using a custom -directive – this is also how this documentation displays source code.

-

From Pygments 0.9, the directive is shipped in the distribution as -external/rst-directive.py. You can copy and adapt this code to your liking.

-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/docs/styles.html b/doc/_build/html/docs/styles.html deleted file mode 100644 index 00f7d27..0000000 --- a/doc/_build/html/docs/styles.html +++ /dev/null @@ -1,373 +0,0 @@ - - - - - - - Styles — Pygments - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Styles¶

-

Pygments comes with some builtin styles that work for both the HTML and -LaTeX formatter.

-

The builtin styles can be looked up with the get_style_by_name function:

-
>>> from pygments.styles import get_style_by_name
->>> get_style_by_name('colorful')
-<class 'pygments.styles.colorful.ColorfulStyle'>
-
-
-

You can pass a instance of a Style class to a formatter as the style -option in form of a string:

-
>>> from pygments.styles import get_style_by_name
->>> from pygments.formatters import HtmlFormatter
->>> HtmlFormatter(style='colorful').style
-<class 'pygments.styles.colorful.ColorfulStyle'>
-
-
-

Or you can also import your own style (which must be a subclass of -pygments.style.Style) and pass it to the formatter:

-
>>> from yourapp.yourmodule import YourStyle
->>> from pygments.formatters import HtmlFormatter
->>> HtmlFormatter(style=YourStyle).style
-<class 'yourapp.yourmodule.YourStyle'>
-
-
-
-

Creating Own Styles¶

-

So, how to create a style? All you have to do is to subclass Style and -define some styles:

-
from pygments.style import Style
-from pygments.token import Keyword, Name, Comment, String, Error, \
-     Number, Operator, Generic
-
-class YourStyle(Style):
-    default_style = ""
-    styles = {
-        Comment:                'italic #888',
-        Keyword:                'bold #005',
-        Name:                   '#f00',
-        Name.Function:          '#0f0',
-        Name.Class:             'bold #0f0',
-        String:                 'bg:#eee #111'
-    }
-
-
-

That’s it. There are just a few rules. When you define a style for Name -the style automatically also affects Name.Function and so on. If you -defined 'bold' and you don’t want boldface for a subtoken use 'nobold'.

-

(Philosophy: the styles aren’t written in CSS syntax since this way -they can be used for a variety of formatters.)

-

default_style is the style inherited by all token types.

-

To make the style usable for Pygments, you must

-
    -
  • either register it as a plugin (see the plugin docs)

  • -
  • or drop it into the styles subpackage of your Pygments distribution one style -class per style, where the file name is the style name and the class name is -StylenameClass. For example, if your style should be called -"mondrian", name the class MondrianStyle, put it into the file -mondrian.py and this file into the pygments.styles subpackage -directory.

  • -
-
-
-

Style Rules¶

-

Here a small overview of all allowed styles:

-
-
bold

render text as bold

-
-
nobold

don’t render text as bold (to prevent subtokens being highlighted bold)

-
-
italic

render text italic

-
-
noitalic

don’t render text as italic

-
-
underline

render text underlined

-
-
nounderline

don’t render text underlined

-
-
bg:

transparent background

-
-
bg:#000000

background color (black)

-
-
border:

no border

-
-
border:#ffffff

border color (white)

-
-
#ff0000

text color (red)

-
-
noinherit

don’t inherit styles from supertoken

-
-
-

Note that there may not be a space between bg: and the color value -since the style definition string is split at whitespace. -Also, using named colors is not allowed since the supported color names -vary for different formatters.

-

Furthermore, not all lexers might support every style.

-
-
-

Builtin Styles¶

-

Pygments ships some builtin styles which are maintained by the Pygments team.

-

To get a list of known styles you can use this snippet:

-
>>> from pygments.styles import STYLE_MAP
->>> STYLE_MAP.keys()
-['default', 'emacs', 'friendly', 'colorful']
-
-
-
-
-

Getting a list of available styles¶

-
-

New in version 0.6.

-
-

Because it could be that a plugin registered a style, there is -a way to iterate over all styles:

-
>>> from pygments.styles import get_all_styles
->>> styles = list(get_all_styles())
-
-
-
-
-

Terminal Styles¶

-
-

New in version 2.2.

-
-

Custom styles used with the 256-color terminal formatter can also map colors to -use the 8 default ANSI colors. To do so, use ansigreen, ansibrightred or -any other colors defined in pygments.style.ansicolors. Foreground ANSI -colors will be mapped to the corresponding escape codes 30 to 37 thus respecting any -custom color mapping and themes provided by many terminal emulators. Light -variants are treated as foreground color with and an added bold flag. -bg:ansi<color> will also be respected, except the light variant will be the -same shade as their dark variant.

-

See the following example where the color of the string "hello world" is -governed by the escape sequence \x1b[34;01m (Ansi bright blue, Bold, 41 being red -background) instead of an extended foreground & background color.

-
>>> from pygments import highlight
->>> from pygments.style import Style
->>> from pygments.token import Token
->>> from pygments.lexers import Python3Lexer
->>> from pygments.formatters import Terminal256Formatter
-
->>> class MyStyle(Style):
-        styles = {
-            Token.String:     'ansibrightblue bg:ansibrightred',
-        }
-
->>> code = 'print("Hello World")'
->>> result = highlight(code, Python3Lexer(), Terminal256Formatter(style=MyStyle))
->>> print(result.encode())
-b'\x1b[34;41;01m"\x1b[39;49;00m\x1b[34;41;01mHello World\x1b[39;49;00m\x1b[34;41;01m"\x1b[39;49;00m'
-
-
-

Colors specified using ansi* are converted to a default set of RGB colors -when used with formatters other than the terminal-256 formatter.

-

By definition of ANSI, the following colors are considered “light” colors, and -will be rendered by most terminals as bold:

-
    -
  • “brightblack” (darkgrey), “brightred”, “brightgreen”, “brightyellow”, “brightblue”, -“brightmagenta”, “brightcyan”, “white”

  • -
-

The following are considered “dark” colors and will be rendered as non-bold:

-
    -
  • “black”, “red”, “green”, “yellow”, “blue”, “magenta”, “cyan”, -“gray”

  • -
-

Exact behavior might depends on the terminal emulator you are using, and its -settings.

-
-

Changed in version 2.4.

-
-

The definition of the ANSI color names has changed. -New names are easier to understand and align to the colors used in other projects.

- ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

New names

Pygments up to 2.3

ansiblack

#ansiblack

ansired

#ansidarkred

ansigreen

#ansidarkgreen

ansiyellow

#ansibrown

ansiblue

#ansidarkblue

ansimagenta

#ansipurple

ansicyan

#ansiteal

ansigray

#ansilightgray

ansibrightblack

#ansidarkgray

ansibrightred

#ansired

ansibrightgreen

#ansigreen

ansibrightyellow

#ansiyellow

ansibrightblue

#ansiblue

ansibrightmagenta

#ansifuchsia

ansibrightcyan

#ansiturquoise

ansiwhite

#ansiwhite

-

Old ANSI color names are deprecated but will still work.

-
-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/docs/tokens.html b/doc/_build/html/docs/tokens.html deleted file mode 100644 index 787deb6..0000000 --- a/doc/_build/html/docs/tokens.html +++ /dev/null @@ -1,469 +0,0 @@ - - - - - - - Builtin Tokens — Pygments - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Builtin Tokens¶

-

In the pygments.token module, there is a special object called Token -that is used to create token types.

-

You can create a new token type by accessing an attribute of Token:

-
>>> from pygments.token import Token
->>> Token.String
-Token.String
->>> Token.String is Token.String
-True
-
-
-

Note that tokens are singletons so you can use the is operator for comparing -token types.

-

As of Pygments 0.7 you can also use the in operator to perform set tests:

-
>>> from pygments.token import Comment
->>> Comment.Single in Comment
-True
->>> Comment in Comment.Multi
-False
-
-
-

This can be useful in filters and if you write lexers on your -own without using the base lexers.

-

You can also split a token type into a hierarchy, and get the parent of it:

-
>>> String.split()
-[Token, Token.Literal, Token.Literal.String]
->>> String.parent
-Token.Literal
-
-
-

In principle, you can create an unlimited number of token types but nobody can -guarantee that a style would define style rules for a token type. Because of -that, Pygments proposes some global token types defined in the -pygments.token.STANDARD_TYPES dict.

-

For some tokens aliases are already defined:

-
>>> from pygments.token import String
->>> String
-Token.Literal.String
-
-
-

Inside the pygments.token module the following aliases are defined:

- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Text

Token.Text

for any type of text data

Whitespace

Token.Text.Whitespace

for specially highlighted whitespace

Error

Token.Error

represents lexer errors

Other

Token.Other

special token for data not -matched by a parser (e.g. HTML -markup in PHP code)

Keyword

Token.Keyword

any kind of keywords

Name

Token.Name

variable/function names

Literal

Token.Literal

Any literals

String

Token.Literal.String

string literals

Number

Token.Literal.Number

number literals

Operator

Token.Operator

operators (+, not…)

Punctuation

Token.Punctuation

punctuation ([, (…)

Comment

Token.Comment

any kind of comments

Generic

Token.Generic

generic tokens (have a look at -the explanation below)

-

The Whitespace token type is new in Pygments 0.8. It is used only by the -VisibleWhitespaceFilter currently.

-

Normally you just create token types using the already defined aliases. For each -of those token aliases, a number of subtypes exists (excluding the special tokens -Token.Text, Token.Error and Token.Other)

-

The is_token_subtype() function in the pygments.token module can be used to -test if a token type is a subtype of another (such as Name.Tag and Name). -(This is the same as Name.Tag in Name. The overloaded in operator was newly -introduced in Pygments 0.7, the function still exists for backwards -compatibility.)

-

With Pygments 0.7, it’s also possible to convert strings to token types (for example -if you want to supply a token from the command line):

-
>>> from pygments.token import String, string_to_tokentype
->>> string_to_tokentype("String")
-Token.Literal.String
->>> string_to_tokentype("Token.Literal.String")
-Token.Literal.String
->>> string_to_tokentype(String)
-Token.Literal.String
-
-
-
-

Keyword Tokens¶

-
-
Keyword

For any kind of keyword (especially if it doesn’t match any of the -subtypes of course).

-
-
Keyword.Constant

For keywords that are constants (e.g. None in future Python versions).

-
-
Keyword.Declaration

For keywords used for variable declaration (e.g. var in some programming -languages like JavaScript).

-
-
Keyword.Namespace

For keywords used for namespace declarations (e.g. import in Python and -Java and package in Java).

-
-
Keyword.Pseudo

For keywords that aren’t really keywords (e.g. None in old Python -versions).

-
-
Keyword.Reserved

For reserved keywords.

-
-
Keyword.Type

For builtin types that can’t be used as identifiers (e.g. int, -char etc. in C).

-
-
-
-
-

Name Tokens¶

-
-
Name

For any name (variable names, function names, classes).

-
-
Name.Attribute

For all attributes (e.g. in HTML tags).

-
-
Name.Builtin

Builtin names; names that are available in the global namespace.

-
-
Name.Builtin.Pseudo

Builtin names that are implicit (e.g. self in Ruby, this in Java).

-
-
Name.Class

Class names. Because no lexer can know if a name is a class or a function -or something else this token is meant for class declarations.

-
-
Name.Constant

Token type for constants. In some languages you can recognise a token by the -way it’s defined (the value after a const keyword for example). In -other languages constants are uppercase by definition (Ruby).

-
-
Name.Decorator

Token type for decorators. Decorators are syntactic elements in the Python -language. Similar syntax elements exist in C# and Java.

-
-
Name.Entity

Token type for special entities. (e.g. &nbsp; in HTML).

-
-
Name.Exception

Token type for exception names (e.g. RuntimeError in Python). Some languages -define exceptions in the function signature (Java). You can highlight -the name of that exception using this token then.

-
-
Name.Function

Token type for function names.

-
-
Name.Function.Magic

same as Name.Function but for special function names that have an implicit use -in a language (e.g. __init__ method in Python).

-
-
Name.Label

Token type for label names (e.g. in languages that support goto).

-
-
Name.Namespace

Token type for namespaces. (e.g. import paths in Java/Python), names following -the module/namespace keyword in other languages.

-
-
Name.Other

Other names. Normally unused.

-
-
Name.Tag

Tag names (in HTML/XML markup or configuration files).

-
-
Name.Variable

Token type for variables. Some languages have prefixes for variable names -(PHP, Ruby, Perl). You can highlight them using this token.

-
-
Name.Variable.Class

same as Name.Variable but for class variables (also static variables).

-
-
Name.Variable.Global

same as Name.Variable but for global variables (used in Ruby, for -example).

-
-
Name.Variable.Instance

same as Name.Variable but for instance variables.

-
-
Name.Variable.Magic

same as Name.Variable but for special variable names that have an implicit use -in a language (e.g. __doc__ in Python).

-
-
-
-
-

Literals¶

-
-
Literal

For any literal (if not further defined).

-
-
Literal.Date

for date literals (e.g. 42d in Boo).

-
-
String

For any string literal.

-
-
String.Affix

Token type for affixes that further specify the type of the string they’re -attached to (e.g. the prefixes r and u8 in r"foo" and u8"foo").

-
-
String.Backtick

Token type for strings enclosed in backticks.

-
-
String.Char

Token type for single characters (e.g. Java, C).

-
-
String.Delimiter

Token type for delimiting identifiers in “heredoc”, raw and other similar -strings (e.g. the word END in Perl code print <<'END';).

-
-
String.Doc

Token type for documentation strings (for example Python).

-
-
String.Double

Double quoted strings.

-
-
String.Escape

Token type for escape sequences in strings.

-
-
String.Heredoc

Token type for “heredoc” strings (e.g. in Ruby or Perl).

-
-
String.Interpol

Token type for interpolated parts in strings (e.g. #{foo} in Ruby).

-
-
String.Other

Token type for any other strings (for example %q{foo} string constructs -in Ruby).

-
-
String.Regex

Token type for regular expression literals (e.g. /foo/ in JavaScript).

-
-
String.Single

Token type for single quoted strings.

-
-
String.Symbol

Token type for symbols (e.g. :foo in LISP or Ruby).

-
-
Number

Token type for any number literal.

-
-
Number.Bin

Token type for binary literals (e.g. 0b101010).

-
-
Number.Float

Token type for float literals (e.g. 42.0).

-
-
Number.Hex

Token type for hexadecimal number literals (e.g. 0xdeadbeef).

-
-
Number.Integer

Token type for integer literals (e.g. 42).

-
-
Number.Integer.Long

Token type for long integer literals (e.g. 42L in Python).

-
-
Number.Oct

Token type for octal literals.

-
-
-
-
-

Operators¶

-
-
Operator

For any punctuation operator (e.g. +, -).

-
-
Operator.Word

For any operator that is a word (e.g. not).

-
-
-
-
-

Punctuation¶

-
-

New in version 0.7.

-
-
-
Punctuation

For any punctuation which is not an operator (e.g. [, (…)

-
-
-
-
-

Comments¶

-
-
Comment

Token type for any comment.

-
-
Comment.Hashbang
-
Token type for hashbang comments (i.e. first lines of files that start with

#!).

-
-
-
-
Comment.Multiline

Token type for multiline comments.

-
-
Comment.Preproc

Token type for preprocessor comments (also <?php/<% constructs).

-
-
Comment.Single

Token type for comments that end at the end of a line (e.g. # foo).

-
-
Comment.Special

Special data in comments. For example code tags, author and license -information, etc.

-
-
-
-
-

Generic Tokens¶

-

Generic tokens are for special lexers like the DiffLexer that doesn’t really -highlight a programming language but a patch file.

-
-
Generic

A generic, unstyled token. Normally you don’t use this token type.

-
-
Generic.Deleted

Marks the token value as deleted.

-
-
Generic.Emph

Marks the token value as emphasized.

-
-
Generic.Error

Marks the token value as an error message.

-
-
Generic.Heading

Marks the token value as headline.

-
-
Generic.Inserted

Marks the token value as inserted.

-
-
Generic.Output

Marks the token value as program output (e.g. for python cli lexer).

-
-
Generic.Prompt

Marks the token value as command prompt (e.g. bash lexer).

-
-
Generic.Strong

Marks the token value as bold (e.g. for rst lexer).

-
-
Generic.Subheading

Marks the token value as subheadline.

-
-
Generic.Traceback

Marks the token value as a part of an error traceback.

-
-
-
-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/docs/unicode.html b/doc/_build/html/docs/unicode.html deleted file mode 100644 index 1b8aaf1..0000000 --- a/doc/_build/html/docs/unicode.html +++ /dev/null @@ -1,170 +0,0 @@ - - - - - - - Unicode and Encodings — Pygments - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Unicode and Encodings¶

-

Since Pygments 0.6, all lexers use unicode strings internally. Because of that -you might encounter the occasional UnicodeDecodeError if you pass strings -with the wrong encoding.

-

Per default all lexers have their input encoding set to guess. This means -that the following encodings are tried:

-
    -
  • UTF-8 (including BOM handling)

  • -
  • The locale encoding (i.e. the result of locale.getpreferredencoding())

  • -
  • As a last resort, latin1

  • -
-

If you pass a lexer a byte string object (not unicode), it tries to decode the -data using this encoding.

-

You can override the encoding using the encoding or inencoding lexer -options. If you have the chardet library installed and set the encoding to -chardet if will analyse the text and use the encoding it thinks is the -right one automatically:

-
from pygments.lexers import PythonLexer
-lexer = PythonLexer(encoding='chardet')
-
-
-

The best way is to pass Pygments unicode objects. In that case you can’t get -unexpected output.

-

The formatters now send Unicode objects to the stream if you don’t set the -output encoding. You can do so by passing the formatters an encoding option:

-
from pygments.formatters import HtmlFormatter
-f = HtmlFormatter(encoding='utf-8')
-
-
-

You will have to set this option if you have non-ASCII characters in the -source and the output stream does not accept Unicode written to it! -This is the case for all regular files and for terminals.

-

Note: The Terminal formatter tries to be smart: if its output stream has an -encoding attribute, and you haven’t set the option, it will encode any -Unicode string with this encoding before writing it. This is the case for -sys.stdout, for example. The other formatters don’t have that behavior.

-

Another note: If you call Pygments via the command line (pygmentize), -encoding is handled differently, see the command line docs.

-
-

New in version 0.7: The formatters now also accept an outencoding option which will override -the encoding option if given. This makes it possible to use a single -options dict with lexers and formatters, and still have different input and -output encodings.

-
-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/download.html b/doc/_build/html/download.html deleted file mode 100644 index ab70e42..0000000 --- a/doc/_build/html/download.html +++ /dev/null @@ -1,156 +0,0 @@ - - - - - - - Download and installation — Pygments - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Download and installation¶

-

The current release is version 2.4.2.

-
-

Packaged versions¶

-

You can download it from the Python Package Index. For installation of packages from -PyPI, we recommend Pip, which works on all -major platforms.

-

Under Linux, most distributions include a package for Pygments, usually called -pygments or python-pygments. You can install it with the package -manager as usual.

-
-
-

Development sources¶

-

We’re using the Git version control system. You can get the development source -using this command:

-
git clone https://github.com/pygments/pygments
-
-
-

Development takes place at GitHub.

-

The latest changes in the development source code are listed in the changelog.

-
-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/faq.html b/doc/_build/html/faq.html deleted file mode 100644 index 7f164ab..0000000 --- a/doc/_build/html/faq.html +++ /dev/null @@ -1,255 +0,0 @@ - - - - - - - Pygments FAQ — Pygments - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Pygments FAQ¶

-
-

What is Pygments?¶

-

Pygments is a syntax highlighting engine written in Python. That means, it will -take source code (or other markup) in a supported language and output a -processed version (in different formats) containing syntax highlighting markup.

-

Its features include:

-
    -
  • a wide range of common languages and markup formats is supported

  • -
  • new languages and formats are added easily

  • -
  • a number of output formats is available, including:

    -
      -
    • HTML

    • -
    • ANSI sequences (console output)

    • -
    • LaTeX

    • -
    • RTF

    • -
    -
  • -
  • it is usable as a command-line tool and as a library

  • -
  • parsing and formatting is fast

  • -
-

Pygments is licensed under the BSD license.

-
-
-

Where does the name Pygments come from?¶

-

Py of course stands for Python, while pigments are used for coloring paint, -and in this case, source code!

-
-
-

What are the system requirements?¶

-

Pygments only needs a standard Python install, version 2.7 or higher or version -3.5 or higher for Python 3. No additional libraries are needed.

-
-
-

How can I use Pygments?¶

-

Pygments is usable as a command-line tool as well as a library.

-

From the command-line, usage looks like this (assuming the pygmentize script is -properly installed):

-
pygmentize -f html /path/to/file.py
-
-
-

This will print a HTML-highlighted version of /path/to/file.py to standard output.

-

For a complete help, please run pygmentize -h.

-

Usage as a library is thoroughly demonstrated in the Documentation section.

-
-
-

How do I make a new style?¶

-

Please see the documentation on styles.

-
-
-

How can I report a bug or suggest a feature?¶

-

Please report bugs and feature wishes in the tracker at GitHub.

-

You can also e-mail the authors, see the contact details.

-
-
-

I want this support for this language!¶

-

Instead of waiting for others to include language support, why not write it -yourself? All you have to know is outlined in the docs.

-
-
-

Can I use Pygments for programming language processing?¶

-

The Pygments lexing machinery is quite powerful can be used to build lexers for -basically all languages. However, parsing them is not possible, though some -lexers go some steps in this direction in order to e.g. highlight function names -differently.

-

Also, error reporting is not the scope of Pygments. It focuses on correctly -highlighting syntactically valid documents, not finding and compensating errors.

-
-
-

Who uses Pygments?¶

-

This is an (incomplete) list of projects and sites known to use the Pygments highlighter.

-
    -
  • Wikipedia

  • -
  • BitBucket, a Mercurial and Git hosting site

  • -
  • The Sphinx documentation builder, for embedded source examples

  • -
  • rst2pdf, a reStructuredText to PDF converter

  • -
  • Codecov, a code coverage CI service

  • -
  • Trac, the universal project management tool

  • -
  • AsciiDoc, a text-based documentation generator

  • -
  • ActiveState Code, the Python Cookbook successor

  • -
  • ViewVC, a web-based version control repository browser

  • -
  • BzrFruit, a Bazaar branch viewer

  • -
  • QBzr, a cross-platform Qt-based GUI front end for Bazaar

  • -
  • Review Board, a collaborative code reviewing tool

  • -
  • Diamanda, a Django powered wiki system with support for Pygments

  • -
  • Progopedia (English), -an encyclopedia of programming languages

  • -
  • Bruce, a reStructuredText presentation tool

  • -
  • PIDA, a universal IDE written in Python

  • -
  • BPython, a curses-based intelligent Python shell

  • -
  • PuDB, a console Python debugger

  • -
  • XWiki, a wiki-based development framework in Java, using Jython

  • -
  • roux, a script for running R scripts -and creating beautiful output including graphs

  • -
  • hurl, a web service for making HTTP requests

  • -
  • wxHTMLPygmentizer is -a GUI utility, used to make code-colorization easier

  • -
  • Postmarkup, a BBCode to XHTML generator

  • -
  • WpPygments, and WPygments, highlighter plugins for WordPress

  • -
  • Siafoo, a tool for sharing and storing useful code and programming experience

  • -
  • D source, a community for the D programming language

  • -
  • dpaste.com, another Django pastebin

  • -
  • Django snippets, a pastebin for Django code

  • -
  • Fayaa, a Chinese pastebin

  • -
  • Incollo.com, a free collaborative debugging tool

  • -
  • PasteBox, a pastebin focused on privacy

  • -
  • hilite.me, a site to highlight code snippets

  • -
  • patx.me, a pastebin

  • -
  • Fluidic, an experiment in -integrating shells with a GUI

  • -
  • pygments.rb, a pygments wrapper for Ruby

  • -
  • Clygments, a pygments wrapper for -Clojure

  • -
  • PHPygments, a pygments wrapper for PHP

  • -
  • Spyder, the Scientific Python Development -Environment, uses pygments for the multi-language syntax highlighting in its -editor.

  • -
-

If you have a project or web site using Pygments, drop me a line, and I’ll add a -link here.

-
-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/genindex.html b/doc/_build/html/genindex.html deleted file mode 100644 index 9c1ed58..0000000 --- a/doc/_build/html/genindex.html +++ /dev/null @@ -1,1682 +0,0 @@ - - - - - - - - Index — Pygments - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- - -

Index

- -
- A - | B - | C - | D - | E - | F - | G - | H - | I - | J - | K - | L - | M - | N - | O - | P - | Q - | R - | S - | T - | U - | V - | W - | X - | Y - | Z - -
-

A

- - - -
- -

B

- - - -
- -

C

- - - -
- -

D

- - - -
- -

E

- - - -
- -

F

- - - -
- -

G

- - - -
- -

H

- - - -
- -

I

- - - -
- -

J

- - - -
- -

K

- - - -
- -

L

- - - -
- -

M

- - - -
- -

N

- - - -
- -

O

- - - -
- -

P

- - - -
- -

Q

- - - -
- -

R

- - - -
- -

S

- - - -
- -

T

- - - -
- -

U

- - - -
- -

V

- - - -
- -

W

- - - -
- -

X

- - - -
- -

Y

- - - -
- -

Z

- - - -
- - - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/index.html b/doc/_build/html/index.html deleted file mode 100644 index 80f2259..0000000 --- a/doc/_build/html/index.html +++ /dev/null @@ -1,166 +0,0 @@ - - - - - - - Welcome! — Pygments - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Welcome!¶

-

This is the home of Pygments. It is a generic syntax highlighter suitable for -use in code hosting, forums, wikis or other applications that need to prettify -source code. Highlights are:

-
    -
  • a wide range of over 300 languages and other text formats is supported

  • -
  • special attention is paid to details that increase highlighting quality

  • -
  • support for new languages and formats are added easily; most languages use a -simple regex-based lexing mechanism

  • -
  • a number of output formats is available, among them HTML, RTF, LaTeX and ANSI -sequences

  • -
  • it is usable as a command-line tool and as a library

  • -
  • … and it highlights even Perl 6!

  • -
-

Read more in the FAQ list or the documentation, -or download the latest release.

-
-

Contribute¶

-

Like every open-source project, we are always looking for volunteers to help us -with programming. Python knowledge is required, but don’t fear: Python is a very -clear and easy to learn language.

-

Development takes place on GitHub.

-

If you found a bug, just open a ticket in the GitHub tracker. Be sure to log -in to be notified when the issue is fixed – development is not fast-paced as -the library is quite stable. You can also send an e-mail to the developers, see -below.

-
-
-

The authors¶

-

Pygments is maintained by Georg Brandl, e-mail address georg@python.org -and Matthäus Chajdas.

-

Many lexers and fixes have been contributed by Armin Ronacher, the rest of -the Pocoo team and Tim Hatch.

-
-
-
-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/languages.html b/doc/_build/html/languages.html deleted file mode 100644 index 279da8c..0000000 --- a/doc/_build/html/languages.html +++ /dev/null @@ -1,301 +0,0 @@ - - - - - - - Supported languages — Pygments - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -
-

Supported languages¶

-

Pygments supports an ever-growing range of languages. Watch this space…

-
-

Programming languages¶

-
    -
  • ActionScript

  • -
  • Ada

  • -
  • ANTLR

  • -
  • AppleScript

  • -
  • Assembly (various)

  • -
  • Asymptote

  • -
  • Augeas

  • -
  • Awk

  • -
  • BBC Basic

  • -
  • Befunge

  • -
  • Boa

  • -
  • Boo

  • -
  • BrainFuck

  • -
  • C, C++

  • -
  • C#

  • -
  • Charm++ CI

  • -
  • Clojure

  • -
  • CoffeeScript

  • -
  • ColdFusion

  • -
  • Common Lisp

  • -
  • Coq

  • -
  • Cryptol (incl. Literate Cryptol)

  • -
  • Crystal

  • -
  • Cython

  • -
  • D

  • -
  • Dart

  • -
  • DCPU-16

  • -
  • Delphi

  • -
  • Dylan

  • -
  • Elm

  • -
  • Email

  • -
  • Erlang

  • -
  • Ezhil Ezhil - A Tamil programming language

  • -
  • Factor

  • -
  • Fancy

  • -
  • Fennel

  • -
  • FloScript

  • -
  • Fortran

  • -
  • FreeFEM++

  • -
  • F#

  • -
  • GAP

  • -
  • Gherkin (Cucumber)

  • -
  • GL shaders

  • -
  • Groovy

  • -
  • Haskell (incl. Literate Haskell)

  • -
  • HLSL

  • -
  • HSpec

  • -
  • IDL

  • -
  • Io

  • -
  • Java

  • -
  • JavaScript

  • -
  • Lasso

  • -
  • LLVM

  • -
  • Logtalk

  • -
  • Lua

  • -
  • Matlab

  • -
  • MiniD

  • -
  • Modelica

  • -
  • Modula-2

  • -
  • MuPad

  • -
  • Nemerle

  • -
  • Nimrod

  • -
  • Notmuch

  • -
  • Objective-C

  • -
  • Objective-J

  • -
  • Octave

  • -
  • OCaml

  • -
  • PHP

  • -
  • Perl 5 and Perl 6

  • -
  • Pony

  • -
  • PovRay

  • -
  • PostScript

  • -
  • PowerShell

  • -
  • Prolog

  • -
  • Python 2.x and 3.x (incl. console sessions and tracebacks)

  • -
  • REBOL

  • -
  • Red

  • -
  • Redcode

  • -
  • Ruby (incl. irb sessions)

  • -
  • Rust

  • -
  • S, S-Plus, R

  • -
  • Scala

  • -
  • Scdoc

  • -
  • Scheme

  • -
  • Scilab

  • -
  • SGF

  • -
  • Slash

  • -
  • Slurm

  • -
  • Smalltalk

  • -
  • SNOBOL

  • -
  • Solidity

  • -
  • Tcl

  • -
  • Tera Term language

  • -
  • TOML

  • -
  • Vala

  • -
  • Verilog

  • -
  • VHDL

  • -
  • Visual Basic.NET

  • -
  • Visual FoxPro

  • -
  • XQuery

  • -
  • Zeek

  • -
  • Zephir

  • -
  • Zig

  • -
-
-
-

Template languages¶

-
    -
  • Cheetah templates

  • -
  • Django / Jinja templates

  • -
  • ERB (Ruby templating)

  • -
  • Genshi (the Trac template language)

  • -
  • JSP (Java Server Pages)

  • -
  • Myghty (the HTML::Mason based framework)

  • -
  • Mako (the Myghty successor)

  • -
  • Smarty templates (PHP templating)

  • -
  • Tea

  • -
-
-
-

Other markup¶

-
    -
  • Apache config files

  • -
  • Bash shell scripts

  • -
  • BBCode

  • -
  • CMake

  • -
  • CSS

  • -
  • Debian control files

  • -
  • Diff files

  • -
  • DTD

  • -
  • Gettext catalogs

  • -
  • Gnuplot script

  • -
  • Groff markup

  • -
  • HTML

  • -
  • HTTP sessions

  • -
  • INI-style config files

  • -
  • IRC logs (irssi style)

  • -
  • Lighttpd config files

  • -
  • Makefiles

  • -
  • MoinMoin/Trac Wiki markup

  • -
  • MySQL

  • -
  • Nginx config files

  • -
  • POV-Ray scenes

  • -
  • Ragel

  • -
  • Redcode

  • -
  • ReST

  • -
  • Robot Framework

  • -
  • RPM spec files

  • -
  • SQL, also MySQL, SQLite

  • -
  • Squid configuration

  • -
  • TeX

  • -
  • tcsh

  • -
  • Vim Script

  • -
  • Windows batch files

  • -
  • XML

  • -
  • XSLT

  • -
  • YAML

  • -
-
-
-

… that’s all?¶

-

Well, why not write your own? Contributing to Pygments is easy and fun. Take a -look at the docs on lexer development. Pull -requests are welcome on GitHub <https://github.com/pygments/pygments>.

-

Note: the languages listed here are supported in the development version. The -latest release may lack a few of them.

-
-
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/objects.inv b/doc/_build/html/objects.inv deleted file mode 100644 index 63b10ba..0000000 Binary files a/doc/_build/html/objects.inv and /dev/null differ diff --git a/doc/_build/html/py-modindex.html b/doc/_build/html/py-modindex.html deleted file mode 100644 index 60a287e..0000000 --- a/doc/_build/html/py-modindex.html +++ /dev/null @@ -1,820 +0,0 @@ - - - - - - - Python Module Index — Pygments - - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- - -

Python Module Index

- -
- p -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 
- p
- pygments -
    - pygments.formatter -
    - pygments.formatters -
    - pygments.lexer -
    - pygments.lexers -
    - pygments.lexers.actionscript -
    - pygments.lexers.algebra -
    - pygments.lexers.ambient -
    - pygments.lexers.ampl -
    - pygments.lexers.apl -
    - pygments.lexers.archetype -
    - pygments.lexers.asm -
    - pygments.lexers.automation -
    - pygments.lexers.basic -
    - pygments.lexers.bibtex -
    - pygments.lexers.boa -
    - pygments.lexers.business -
    - pygments.lexers.c_cpp -
    - pygments.lexers.c_like -
    - pygments.lexers.capnproto -
    - pygments.lexers.chapel -
    - pygments.lexers.clean -
    - pygments.lexers.configs -
    - pygments.lexers.console -
    - pygments.lexers.crystal -
    - pygments.lexers.csound -
    - pygments.lexers.css -
    - pygments.lexers.d -
    - pygments.lexers.dalvik -
    - pygments.lexers.data -
    - pygments.lexers.diff -
    - pygments.lexers.dotnet -
    - pygments.lexers.dsls -
    - pygments.lexers.dylan -
    - pygments.lexers.ecl -
    - pygments.lexers.eiffel -
    - pygments.lexers.elm -
    - pygments.lexers.email -
    - pygments.lexers.erlang -
    - pygments.lexers.esoteric -
    - pygments.lexers.ezhil -
    - pygments.lexers.factor -
    - pygments.lexers.fantom -
    - pygments.lexers.felix -
    - pygments.lexers.floscript -
    - pygments.lexers.forth -
    - pygments.lexers.fortran -
    - pygments.lexers.foxpro -
    - pygments.lexers.freefem -
    - pygments.lexers.go -
    - pygments.lexers.grammar_notation -
    - pygments.lexers.graph -
    - pygments.lexers.graphics -
    - pygments.lexers.haskell -
    - pygments.lexers.haxe -
    - pygments.lexers.hdl -
    - pygments.lexers.hexdump -
    - pygments.lexers.html -
    - pygments.lexers.idl -
    - pygments.lexers.igor -
    - pygments.lexers.inferno -
    - pygments.lexers.installers -
    - pygments.lexers.int_fiction -
    - pygments.lexers.iolang -
    - pygments.lexers.j -
    - pygments.lexers.javascript -
    - pygments.lexers.julia -
    - pygments.lexers.jvm -
    - pygments.lexers.lisp -
    - pygments.lexers.make -
    - pygments.lexers.markup -
    - pygments.lexers.matlab -
    - pygments.lexers.mime -
    - pygments.lexers.ml -
    - pygments.lexers.modeling -
    - pygments.lexers.modula2 -
    - pygments.lexers.monte -
    - pygments.lexers.ncl -
    - pygments.lexers.nimrod -
    - pygments.lexers.nit -
    - pygments.lexers.nix -
    - pygments.lexers.oberon -
    - pygments.lexers.objective -
    - pygments.lexers.ooc -
    - pygments.lexers.parasail -
    - pygments.lexers.parsers -
    - pygments.lexers.pascal -
    - pygments.lexers.pawn -
    - pygments.lexers.perl -
    - pygments.lexers.php -
    - pygments.lexers.pony -
    - pygments.lexers.praat -
    - pygments.lexers.prolog -
    - pygments.lexers.python -
    - pygments.lexers.qvt -
    - pygments.lexers.r -
    - pygments.lexers.rdf -
    - pygments.lexers.rebol -
    - pygments.lexers.resource -
    - pygments.lexers.rnc -
    - pygments.lexers.roboconf -
    - pygments.lexers.robotframework -
    - pygments.lexers.ruby -
    - pygments.lexers.rust -
    - pygments.lexers.sas -
    - pygments.lexers.scdoc -
    - pygments.lexers.scripting -
    - pygments.lexers.sgf -
    - pygments.lexers.shell -
    - pygments.lexers.slash -
    - pygments.lexers.smalltalk -
    - pygments.lexers.smv -
    - pygments.lexers.snobol -
    - pygments.lexers.solidity -
    - pygments.lexers.special -
    - pygments.lexers.sql -
    - pygments.lexers.stata -
    - pygments.lexers.supercollider -
    - pygments.lexers.tcl -
    - pygments.lexers.templates -
    - pygments.lexers.teraterm -
    - pygments.lexers.testing -
    - pygments.lexers.textedit -
    - pygments.lexers.textfmts -
    - pygments.lexers.theorem -
    - pygments.lexers.trafficscript -
    - pygments.lexers.typoscript -
    - pygments.lexers.unicon -
    - pygments.lexers.urbi -
    - pygments.lexers.varnish -
    - pygments.lexers.verification -
    - pygments.lexers.webmisc -
    - pygments.lexers.whiley -
    - pygments.lexers.x10 -
    - pygments.lexers.xorg -
    - pygments.lexers.zig -
    - pygments.styles -
    - pygments.token -
    - pygments.util -
- - -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/search.html b/doc/_build/html/search.html deleted file mode 100644 index ab4abe5..0000000 --- a/doc/_build/html/search.html +++ /dev/null @@ -1,126 +0,0 @@ - - - - - - - Search — Pygments - - - - - - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -

Search

-
- -

- Please activate JavaScript to enable the search - functionality. -

-
-

- From here you can search these documents. Enter your search - words into the box below and click "search". Note that the search - function will automatically search for all of the words. Pages - containing fewer words won't appear in the result list. -

-
- - - -
- -
- -
- -
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/doc/_build/html/searchindex.js b/doc/_build/html/searchindex.js deleted file mode 100644 index bba49a9..0000000 --- a/doc/_build/html/searchindex.js +++ /dev/null @@ -1 +0,0 @@ -Search.setIndex({docnames:["docs/api","docs/authors","docs/changelog","docs/cmdline","docs/filterdevelopment","docs/filters","docs/formatterdevelopment","docs/formatters","docs/index","docs/integrate","docs/java","docs/lexerdevelopment","docs/lexers","docs/moinmoin","docs/plugins","docs/quickstart","docs/rstdirective","docs/styles","docs/tokens","docs/unicode","download","faq","index","languages"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,sphinx:56},filenames:["docs/api.rst","docs/authors.rst","docs/changelog.rst","docs/cmdline.rst","docs/filterdevelopment.rst","docs/filters.rst","docs/formatterdevelopment.rst","docs/formatters.rst","docs/index.rst","docs/integrate.rst","docs/java.rst","docs/lexerdevelopment.rst","docs/lexers.rst","docs/moinmoin.rst","docs/plugins.rst","docs/quickstart.rst","docs/rstdirective.rst","docs/styles.rst","docs/tokens.rst","docs/unicode.rst","download.rst","faq.rst","index.rst","languages.rst"],objects:{"":{BBCodeFormatter:[7,0,1,""],BmpImageFormatter:[7,0,1,""],CodeTagFilter:[5,0,1,""],GifImageFormatter:[7,0,1,""],GobbleFilter:[5,0,1,""],HtmlFormatter:[7,0,1,""],IRCFormatter:[7,0,1,""],ImageFormatter:[7,0,1,""],JpgImageFormatter:[7,0,1,""],KeywordCaseFilter:[5,0,1,""],LatexFormatter:[7,0,1,""],NameHighlightFilter:[5,0,1,""],NullFormatter:[7,0,1,""],RaiseOnErrorTokenFilter:[5,0,1,""],RawTokenFormatter:[7,0,1,""],RtfFormatter:[7,0,1,""],SvgFormatter:[7,0,1,""],Terminal256Formatter:[7,0,1,""],TerminalFormatter:[7,0,1,""],TerminalTrueColorFormatter:[7,0,1,""],TestcaseFormatter:[7,0,1,""],TokenMergeFilter:[5,0,1,""],VisibleWhitespaceFilter:[5,0,1,""],pygments:[0,1,0,"-"]},"pygments.formatter":{Formatter:[0,0,1,""]},"pygments.formatter.Formatter":{aliases:[0,3,1,""],filenames:[0,3,1,""],format:[0,4,1,""],get_style_defs:[0,4,1,""],name:[0,3,1,""]},"pygments.formatters":{get_formatter_by_name:[0,2,1,""],get_formatter_for_filename:[0,2,1,""],load_formatter_from_file:[0,2,1,""]},"pygments.lexer":{Lexer:[0,0,1,""]},"pygments.lexer.Lexer":{alias_filenames:[0,3,1,""],aliases:[0,3,1,""],analyse_text:[0,4,1,""],filenames:[0,3,1,""],get_tokens:[0,4,1,""],get_tokens_unprocessed:[0,4,1,""],mimetypes:[0,3,1,""],name:[0,3,1,""]},"pygments.lexers":{actionscript:[12,1,0,"-"],algebra:[12,1,0,"-"],ambient:[12,1,0,"-"],ampl:[12,1,0,"-"],apl:[12,1,0,"-"],archetype:[12,1,0,"-"],asm:[12,1,0,"-"],automation:[12,1,0,"-"],basic:[12,1,0,"-"],bibtex:[12,1,0,"-"],boa:[12,1,0,"-"],business:[12,1,0,"-"],c_cpp:[12,1,0,"-"],c_like:[12,1,0,"-"],capnproto:[12,1,0,"-"],chapel:[12,1,0,"-"],clean:[12,1,0,"-"],configs:[12,1,0,"-"],console:[12,1,0,"-"],crystal:[12,1,0,"-"],csound:[12,1,0,"-"],css:[12,1,0,"-"],d:[12,1,0,"-"],dalvik:[12,1,0,"-"],data:[12,1,0,"-"],diff:[12,1,0,"-"],dotnet:[12,1,0,"-"],dsls:[12,1,0,"-"],dylan:[12,1,0,"-"],ecl:[12,1,0,"-"],eiffel:[12,1,0,"-"],elm:[12,1,0,"-"],email:[12,1,0,"-"],erlang:[12,1,0,"-"],esoteric:[12,1,0,"-"],ezhil:[12,1,0,"-"],factor:[12,1,0,"-"],fantom:[12,1,0,"-"],felix:[12,1,0,"-"],find_lexer_class:[0,2,1,""],find_lexer_class_by_name:[0,2,1,""],floscript:[12,1,0,"-"],forth:[12,1,0,"-"],fortran:[12,1,0,"-"],foxpro:[12,1,0,"-"],freefem:[12,1,0,"-"],get_all_lexers:[0,2,1,""],get_lexer_by_name:[0,2,1,""],get_lexer_for_filename:[0,2,1,""],get_lexer_for_mimetype:[0,2,1,""],go:[12,1,0,"-"],grammar_notation:[12,1,0,"-"],graph:[12,1,0,"-"],graphics:[12,1,0,"-"],guess_lexer:[0,2,1,""],guess_lexer_for_filename:[0,2,1,""],haskell:[12,1,0,"-"],haxe:[12,1,0,"-"],hdl:[12,1,0,"-"],hexdump:[12,1,0,"-"],html:[12,1,0,"-"],idl:[12,1,0,"-"],igor:[12,1,0,"-"],inferno:[12,1,0,"-"],installers:[12,1,0,"-"],int_fiction:[12,1,0,"-"],iolang:[12,1,0,"-"],j:[12,1,0,"-"],javascript:[12,1,0,"-"],julia:[12,1,0,"-"],jvm:[12,1,0,"-"],lisp:[12,1,0,"-"],load_lexer_from_file:[0,2,1,""],make:[12,1,0,"-"],markup:[12,1,0,"-"],matlab:[12,1,0,"-"],mime:[12,1,0,"-"],ml:[12,1,0,"-"],modeling:[12,1,0,"-"],modula2:[12,1,0,"-"],monte:[12,1,0,"-"],ncl:[12,1,0,"-"],nimrod:[12,1,0,"-"],nit:[12,1,0,"-"],nix:[12,1,0,"-"],oberon:[12,1,0,"-"],objective:[12,1,0,"-"],ooc:[12,1,0,"-"],parasail:[12,1,0,"-"],parsers:[12,1,0,"-"],pascal:[12,1,0,"-"],pawn:[12,1,0,"-"],perl:[12,1,0,"-"],php:[12,1,0,"-"],pony:[12,1,0,"-"],praat:[12,1,0,"-"],prolog:[12,1,0,"-"],python:[12,1,0,"-"],qvt:[12,1,0,"-"],r:[12,1,0,"-"],rdf:[12,1,0,"-"],rebol:[12,1,0,"-"],resource:[12,1,0,"-"],rnc:[12,1,0,"-"],roboconf:[12,1,0,"-"],robotframework:[12,1,0,"-"],ruby:[12,1,0,"-"],rust:[12,1,0,"-"],sas:[12,1,0,"-"],scdoc:[12,1,0,"-"],scripting:[12,1,0,"-"],sgf:[12,1,0,"-"],shell:[12,1,0,"-"],slash:[12,1,0,"-"],smalltalk:[12,1,0,"-"],smv:[12,1,0,"-"],snobol:[12,1,0,"-"],solidity:[12,1,0,"-"],special:[12,1,0,"-"],sql:[12,1,0,"-"],stata:[12,1,0,"-"],supercollider:[12,1,0,"-"],tcl:[12,1,0,"-"],templates:[12,1,0,"-"],teraterm:[12,1,0,"-"],testing:[12,1,0,"-"],textedit:[12,1,0,"-"],textfmts:[12,1,0,"-"],theorem:[12,1,0,"-"],trafficscript:[12,1,0,"-"],typoscript:[12,1,0,"-"],unicon:[12,1,0,"-"],urbi:[12,1,0,"-"],varnish:[12,1,0,"-"],verification:[12,1,0,"-"],webmisc:[12,1,0,"-"],whiley:[12,1,0,"-"],x10:[12,1,0,"-"],xorg:[12,1,0,"-"],zig:[12,1,0,"-"]},"pygments.lexers.actionscript":{ActionScript3Lexer:[12,0,1,""],ActionScriptLexer:[12,0,1,""],MxmlLexer:[12,0,1,""]},"pygments.lexers.algebra":{BCLexer:[12,0,1,""],GAPLexer:[12,0,1,""],MathematicaLexer:[12,0,1,""],MuPADLexer:[12,0,1,""]},"pygments.lexers.ambient":{AmbientTalkLexer:[12,0,1,""]},"pygments.lexers.ampl":{AmplLexer:[12,0,1,""]},"pygments.lexers.apl":{APLLexer:[12,0,1,""]},"pygments.lexers.archetype":{AdlLexer:[12,0,1,""],CadlLexer:[12,0,1,""],OdinLexer:[12,0,1,""]},"pygments.lexers.asm":{CObjdumpLexer:[12,0,1,""],Ca65Lexer:[12,0,1,""],CppObjdumpLexer:[12,0,1,""],DObjdumpLexer:[12,0,1,""],Dasm16Lexer:[12,0,1,""],GasLexer:[12,0,1,""],HsailLexer:[12,0,1,""],LlvmLexer:[12,0,1,""],NasmLexer:[12,0,1,""],NasmObjdumpLexer:[12,0,1,""],ObjdumpLexer:[12,0,1,""],TasmLexer:[12,0,1,""]},"pygments.lexers.automation":{AutoItLexer:[12,0,1,""],AutohotkeyLexer:[12,0,1,""]},"pygments.lexers.basic":{BBCBasicLexer:[12,0,1,""],BlitzBasicLexer:[12,0,1,""],BlitzMaxLexer:[12,0,1,""],CbmBasicV2Lexer:[12,0,1,""],MonkeyLexer:[12,0,1,""],QBasicLexer:[12,0,1,""],VBScriptLexer:[12,0,1,""]},"pygments.lexers.bibtex":{BSTLexer:[12,0,1,""],BibTeXLexer:[12,0,1,""]},"pygments.lexers.boa":{BoaLexer:[12,0,1,""]},"pygments.lexers.business":{ABAPLexer:[12,0,1,""],CobolFreeformatLexer:[12,0,1,""],CobolLexer:[12,0,1,""],GoodDataCLLexer:[12,0,1,""],MaqlLexer:[12,0,1,""],OpenEdgeLexer:[12,0,1,""]},"pygments.lexers.c_cpp":{CLexer:[12,0,1,""],CppLexer:[12,0,1,""]},"pygments.lexers.c_like":{ArduinoLexer:[12,0,1,""],CharmciLexer:[12,0,1,""],ClayLexer:[12,0,1,""],CudaLexer:[12,0,1,""],ECLexer:[12,0,1,""],MqlLexer:[12,0,1,""],NesCLexer:[12,0,1,""],PikeLexer:[12,0,1,""],SwigLexer:[12,0,1,""],ValaLexer:[12,0,1,""]},"pygments.lexers.capnproto":{CapnProtoLexer:[12,0,1,""]},"pygments.lexers.chapel":{ChapelLexer:[12,0,1,""]},"pygments.lexers.clean":{CleanLexer:[12,0,1,""]},"pygments.lexers.configs":{ApacheConfLexer:[12,0,1,""],AugeasLexer:[12,0,1,""],Cfengine3Lexer:[12,0,1,""],DockerLexer:[12,0,1,""],IniLexer:[12,0,1,""],KconfigLexer:[12,0,1,""],LighttpdConfLexer:[12,0,1,""],NginxConfLexer:[12,0,1,""],PacmanConfLexer:[12,0,1,""],PkgConfigLexer:[12,0,1,""],PropertiesLexer:[12,0,1,""],RegeditLexer:[12,0,1,""],SquidConfLexer:[12,0,1,""],TOMLLexer:[12,0,1,""],TermcapLexer:[12,0,1,""],TerminfoLexer:[12,0,1,""],TerraformLexer:[12,0,1,""]},"pygments.lexers.console":{PyPyLogLexer:[12,0,1,""],VCTreeStatusLexer:[12,0,1,""]},"pygments.lexers.crystal":{CrystalLexer:[12,0,1,""]},"pygments.lexers.csound":{CsoundDocumentLexer:[12,0,1,""],CsoundOrchestraLexer:[12,0,1,""],CsoundScoreLexer:[12,0,1,""]},"pygments.lexers.css":{CssLexer:[12,0,1,""],LessCssLexer:[12,0,1,""],SassLexer:[12,0,1,""],ScssLexer:[12,0,1,""]},"pygments.lexers.d":{CrocLexer:[12,0,1,""],DLexer:[12,0,1,""],MiniDLexer:[12,0,1,""]},"pygments.lexers.dalvik":{SmaliLexer:[12,0,1,""]},"pygments.lexers.data":{JsonBareObjectLexer:[12,0,1,""],JsonLdLexer:[12,0,1,""],JsonLexer:[12,0,1,""],YamlLexer:[12,0,1,""]},"pygments.lexers.diff":{DarcsPatchLexer:[12,0,1,""],DiffLexer:[12,0,1,""],WDiffLexer:[12,0,1,""]},"pygments.lexers.dotnet":{BooLexer:[12,0,1,""],CSharpAspxLexer:[12,0,1,""],CSharpLexer:[12,0,1,""],FSharpLexer:[12,0,1,""],NemerleLexer:[12,0,1,""],VbNetAspxLexer:[12,0,1,""],VbNetLexer:[12,0,1,""]},"pygments.lexers.dsls":{AlloyLexer:[12,0,1,""],CrmshLexer:[12,0,1,""],FlatlineLexer:[12,0,1,""],MscgenLexer:[12,0,1,""],PanLexer:[12,0,1,""],ProtoBufLexer:[12,0,1,""],PuppetLexer:[12,0,1,""],RslLexer:[12,0,1,""],SnowballLexer:[12,0,1,""],ThriftLexer:[12,0,1,""],VGLLexer:[12,0,1,""],ZeekLexer:[12,0,1,""]},"pygments.lexers.dylan":{DylanConsoleLexer:[12,0,1,""],DylanLexer:[12,0,1,""],DylanLidLexer:[12,0,1,""]},"pygments.lexers.ecl":{ECLLexer:[12,0,1,""]},"pygments.lexers.eiffel":{EiffelLexer:[12,0,1,""]},"pygments.lexers.elm":{ElmLexer:[12,0,1,""]},"pygments.lexers.email":{EmailLexer:[12,0,1,""]},"pygments.lexers.erlang":{ElixirConsoleLexer:[12,0,1,""],ElixirLexer:[12,0,1,""],ErlangLexer:[12,0,1,""],ErlangShellLexer:[12,0,1,""]},"pygments.lexers.esoteric":{AheuiLexer:[12,0,1,""],BefungeLexer:[12,0,1,""],BrainfuckLexer:[12,0,1,""],CAmkESLexer:[12,0,1,""],CapDLLexer:[12,0,1,""],RedcodeLexer:[12,0,1,""]},"pygments.lexers.ezhil":{EzhilLexer:[12,0,1,""]},"pygments.lexers.factor":{FactorLexer:[12,0,1,""]},"pygments.lexers.fantom":{FantomLexer:[12,0,1,""]},"pygments.lexers.felix":{FelixLexer:[12,0,1,""]},"pygments.lexers.floscript":{FloScriptLexer:[12,0,1,""]},"pygments.lexers.forth":{ForthLexer:[12,0,1,""]},"pygments.lexers.fortran":{FortranFixedLexer:[12,0,1,""],FortranLexer:[12,0,1,""]},"pygments.lexers.foxpro":{FoxProLexer:[12,0,1,""]},"pygments.lexers.freefem":{FreeFemLexer:[12,0,1,""]},"pygments.lexers.go":{GoLexer:[12,0,1,""]},"pygments.lexers.grammar_notation":{AbnfLexer:[12,0,1,""],BnfLexer:[12,0,1,""],JsgfLexer:[12,0,1,""]},"pygments.lexers.graph":{CypherLexer:[12,0,1,""]},"pygments.lexers.graphics":{AsymptoteLexer:[12,0,1,""],GLShaderLexer:[12,0,1,""],GnuplotLexer:[12,0,1,""],HLSLShaderLexer:[12,0,1,""],PostScriptLexer:[12,0,1,""],PovrayLexer:[12,0,1,""]},"pygments.lexers.haskell":{AgdaLexer:[12,0,1,""],CryptolLexer:[12,0,1,""],HaskellLexer:[12,0,1,""],HspecLexer:[12,0,1,""],IdrisLexer:[12,0,1,""],KokaLexer:[12,0,1,""],LiterateAgdaLexer:[12,0,1,""],LiterateCryptolLexer:[12,0,1,""],LiterateHaskellLexer:[12,0,1,""],LiterateIdrisLexer:[12,0,1,""]},"pygments.lexers.haxe":{HaxeLexer:[12,0,1,""],HxmlLexer:[12,0,1,""]},"pygments.lexers.hdl":{SystemVerilogLexer:[12,0,1,""],VerilogLexer:[12,0,1,""],VhdlLexer:[12,0,1,""]},"pygments.lexers.hexdump":{HexdumpLexer:[12,0,1,""]},"pygments.lexers.html":{DtdLexer:[12,0,1,""],HamlLexer:[12,0,1,""],HtmlLexer:[12,0,1,""],PugLexer:[12,0,1,""],ScamlLexer:[12,0,1,""],XmlLexer:[12,0,1,""],XsltLexer:[12,0,1,""]},"pygments.lexers.idl":{IDLLexer:[12,0,1,""]},"pygments.lexers.igor":{IgorLexer:[12,0,1,""]},"pygments.lexers.inferno":{LimboLexer:[12,0,1,""]},"pygments.lexers.installers":{DebianControlLexer:[12,0,1,""],NSISLexer:[12,0,1,""],RPMSpecLexer:[12,0,1,""],SourcesListLexer:[12,0,1,""]},"pygments.lexers.int_fiction":{Inform6Lexer:[12,0,1,""],Inform6TemplateLexer:[12,0,1,""],Inform7Lexer:[12,0,1,""],Tads3Lexer:[12,0,1,""]},"pygments.lexers.iolang":{IoLexer:[12,0,1,""]},"pygments.lexers.j":{JLexer:[12,0,1,""]},"pygments.lexers.javascript":{CoffeeScriptLexer:[12,0,1,""],DartLexer:[12,0,1,""],EarlGreyLexer:[12,0,1,""],JavascriptLexer:[12,0,1,""],JuttleLexer:[12,0,1,""],KalLexer:[12,0,1,""],LassoLexer:[12,0,1,""],LiveScriptLexer:[12,0,1,""],MaskLexer:[12,0,1,""],ObjectiveJLexer:[12,0,1,""],TypeScriptLexer:[12,0,1,""]},"pygments.lexers.julia":{JuliaConsoleLexer:[12,0,1,""],JuliaLexer:[12,0,1,""]},"pygments.lexers.jvm":{AspectJLexer:[12,0,1,""],CeylonLexer:[12,0,1,""],ClojureLexer:[12,0,1,""],ClojureScriptLexer:[12,0,1,""],GoloLexer:[12,0,1,""],GosuLexer:[12,0,1,""],GosuTemplateLexer:[12,0,1,""],GroovyLexer:[12,0,1,""],IokeLexer:[12,0,1,""],JasminLexer:[12,0,1,""],JavaLexer:[12,0,1,""],KotlinLexer:[12,0,1,""],PigLexer:[12,0,1,""],SarlLexer:[12,0,1,""],ScalaLexer:[12,0,1,""],XtendLexer:[12,0,1,""]},"pygments.lexers.lisp":{CPSALexer:[12,0,1,""],CommonLispLexer:[12,0,1,""],EmacsLispLexer:[12,0,1,""],FennelLexer:[12,0,1,""],HyLexer:[12,0,1,""],NewLispLexer:[12,0,1,""],RacketLexer:[12,0,1,""],SchemeLexer:[12,0,1,""],ShenLexer:[12,0,1,""],XtlangLexer:[12,0,1,""]},"pygments.lexers.make":{BaseMakefileLexer:[12,0,1,""],CMakeLexer:[12,0,1,""],MakefileLexer:[12,0,1,""]},"pygments.lexers.markup":{BBCodeLexer:[12,0,1,""],GroffLexer:[12,0,1,""],MarkdownLexer:[12,0,1,""],MoinWikiLexer:[12,0,1,""],MozPreprocCssLexer:[12,0,1,""],MozPreprocHashLexer:[12,0,1,""],MozPreprocJavascriptLexer:[12,0,1,""],MozPreprocPercentLexer:[12,0,1,""],MozPreprocXulLexer:[12,0,1,""],RstLexer:[12,0,1,""],TexLexer:[12,0,1,""]},"pygments.lexers.matlab":{MatlabLexer:[12,0,1,""],MatlabSessionLexer:[12,0,1,""],OctaveLexer:[12,0,1,""],ScilabLexer:[12,0,1,""]},"pygments.lexers.mime":{MIMELexer:[12,0,1,""]},"pygments.lexers.ml":{OcamlLexer:[12,0,1,""],OpaLexer:[12,0,1,""],SMLLexer:[12,0,1,""]},"pygments.lexers.modeling":{BugsLexer:[12,0,1,""],JagsLexer:[12,0,1,""],ModelicaLexer:[12,0,1,""],StanLexer:[12,0,1,""]},"pygments.lexers.modula2":{Modula2Lexer:[12,0,1,""]},"pygments.lexers.monte":{MonteLexer:[12,0,1,""]},"pygments.lexers.ncl":{NCLLexer:[12,0,1,""]},"pygments.lexers.nimrod":{NimrodLexer:[12,0,1,""]},"pygments.lexers.nit":{NitLexer:[12,0,1,""]},"pygments.lexers.nix":{NixLexer:[12,0,1,""]},"pygments.lexers.oberon":{ComponentPascalLexer:[12,0,1,""]},"pygments.lexers.objective":{LogosLexer:[12,0,1,""],ObjectiveCLexer:[12,0,1,""],ObjectiveCppLexer:[12,0,1,""],SwiftLexer:[12,0,1,""]},"pygments.lexers.ooc":{OocLexer:[12,0,1,""]},"pygments.lexers.parasail":{ParaSailLexer:[12,0,1,""]},"pygments.lexers.parsers":{AntlrActionScriptLexer:[12,0,1,""],AntlrCSharpLexer:[12,0,1,""],AntlrCppLexer:[12,0,1,""],AntlrJavaLexer:[12,0,1,""],AntlrLexer:[12,0,1,""],AntlrObjectiveCLexer:[12,0,1,""],AntlrPerlLexer:[12,0,1,""],AntlrPythonLexer:[12,0,1,""],AntlrRubyLexer:[12,0,1,""],EbnfLexer:[12,0,1,""],RagelCLexer:[12,0,1,""],RagelCppLexer:[12,0,1,""],RagelDLexer:[12,0,1,""],RagelEmbeddedLexer:[12,0,1,""],RagelJavaLexer:[12,0,1,""],RagelLexer:[12,0,1,""],RagelObjectiveCLexer:[12,0,1,""],RagelRubyLexer:[12,0,1,""],TreetopLexer:[12,0,1,""]},"pygments.lexers.pascal":{AdaLexer:[12,0,1,""],DelphiLexer:[12,0,1,""]},"pygments.lexers.pawn":{PawnLexer:[12,0,1,""],SourcePawnLexer:[12,0,1,""]},"pygments.lexers.perl":{Perl6Lexer:[12,0,1,""],PerlLexer:[12,0,1,""]},"pygments.lexers.php":{PhpLexer:[12,0,1,""],ZephirLexer:[12,0,1,""]},"pygments.lexers.pony":{PonyLexer:[12,0,1,""]},"pygments.lexers.praat":{PraatLexer:[12,0,1,""]},"pygments.lexers.prolog":{LogtalkLexer:[12,0,1,""],PrologLexer:[12,0,1,""]},"pygments.lexers.python":{CythonLexer:[12,0,1,""],DgLexer:[12,0,1,""],NumPyLexer:[12,0,1,""],Python2Lexer:[12,0,1,""],Python2TracebackLexer:[12,0,1,""],PythonConsoleLexer:[12,0,1,""],PythonLexer:[12,0,1,""],PythonTracebackLexer:[12,0,1,""]},"pygments.lexers.qvt":{QVToLexer:[12,0,1,""]},"pygments.lexers.r":{RConsoleLexer:[12,0,1,""],RdLexer:[12,0,1,""],SLexer:[12,0,1,""]},"pygments.lexers.rdf":{ShExCLexer:[12,0,1,""],SparqlLexer:[12,0,1,""],TurtleLexer:[12,0,1,""]},"pygments.lexers.rebol":{RebolLexer:[12,0,1,""],RedLexer:[12,0,1,""]},"pygments.lexers.resource":{ResourceLexer:[12,0,1,""]},"pygments.lexers.rnc":{RNCCompactLexer:[12,0,1,""]},"pygments.lexers.roboconf":{RoboconfGraphLexer:[12,0,1,""],RoboconfInstancesLexer:[12,0,1,""]},"pygments.lexers.robotframework":{RobotFrameworkLexer:[12,0,1,""]},"pygments.lexers.ruby":{FancyLexer:[12,0,1,""],RubyConsoleLexer:[12,0,1,""],RubyLexer:[12,0,1,""]},"pygments.lexers.rust":{RustLexer:[12,0,1,""]},"pygments.lexers.sas":{SASLexer:[12,0,1,""]},"pygments.lexers.scdoc":{ScdocLexer:[12,0,1,""]},"pygments.lexers.scripting":{AppleScriptLexer:[12,0,1,""],ChaiscriptLexer:[12,0,1,""],EasytrieveLexer:[12,0,1,""],HybrisLexer:[12,0,1,""],JclLexer:[12,0,1,""],LSLLexer:[12,0,1,""],LuaLexer:[12,0,1,""],MOOCodeLexer:[12,0,1,""],MoonScriptLexer:[12,0,1,""],RexxLexer:[12,0,1,""]},"pygments.lexers.sgf":{SmartGameFormatLexer:[12,0,1,""]},"pygments.lexers.shell":{BashLexer:[12,0,1,""],BashSessionLexer:[12,0,1,""],BatchLexer:[12,0,1,""],FishShellLexer:[12,0,1,""],MSDOSSessionLexer:[12,0,1,""],PowerShellLexer:[12,0,1,""],PowerShellSessionLexer:[12,0,1,""],SlurmBashLexer:[12,0,1,""],TcshLexer:[12,0,1,""],TcshSessionLexer:[12,0,1,""]},"pygments.lexers.slash":{SlashLexer:[12,0,1,""]},"pygments.lexers.smalltalk":{NewspeakLexer:[12,0,1,""],SmalltalkLexer:[12,0,1,""]},"pygments.lexers.smv":{NuSMVLexer:[12,0,1,""]},"pygments.lexers.snobol":{SnobolLexer:[12,0,1,""]},"pygments.lexers.solidity":{SolidityLexer:[12,0,1,""]},"pygments.lexers.special":{RawTokenLexer:[12,0,1,""],TextLexer:[12,0,1,""]},"pygments.lexers.sql":{MySqlLexer:[12,0,1,""],PlPgsqlLexer:[12,0,1,""],PostgresConsoleLexer:[12,0,1,""],PostgresLexer:[12,0,1,""],RqlLexer:[12,0,1,""],SqlLexer:[12,0,1,""],SqliteConsoleLexer:[12,0,1,""],TransactSqlLexer:[12,0,1,""]},"pygments.lexers.stata":{StataLexer:[12,0,1,""]},"pygments.lexers.supercollider":{SuperColliderLexer:[12,0,1,""]},"pygments.lexers.tcl":{TclLexer:[12,0,1,""]},"pygments.lexers.templates":{Angular2HtmlLexer:[12,0,1,""],Angular2Lexer:[12,0,1,""],CheetahHtmlLexer:[12,0,1,""],CheetahJavascriptLexer:[12,0,1,""],CheetahLexer:[12,0,1,""],CheetahXmlLexer:[12,0,1,""],ColdfusionCFCLexer:[12,0,1,""],ColdfusionHtmlLexer:[12,0,1,""],ColdfusionLexer:[12,0,1,""],CssDjangoLexer:[12,0,1,""],CssErbLexer:[12,0,1,""],CssGenshiLexer:[12,0,1,""],CssPhpLexer:[12,0,1,""],CssSmartyLexer:[12,0,1,""],DjangoLexer:[12,0,1,""],ErbLexer:[12,0,1,""],EvoqueHtmlLexer:[12,0,1,""],EvoqueLexer:[12,0,1,""],EvoqueXmlLexer:[12,0,1,""],GenshiLexer:[12,0,1,""],GenshiTextLexer:[12,0,1,""],HandlebarsHtmlLexer:[12,0,1,""],HandlebarsLexer:[12,0,1,""],HtmlDjangoLexer:[12,0,1,""],HtmlGenshiLexer:[12,0,1,""],HtmlPhpLexer:[12,0,1,""],HtmlSmartyLexer:[12,0,1,""],JavascriptDjangoLexer:[12,0,1,""],JavascriptErbLexer:[12,0,1,""],JavascriptGenshiLexer:[12,0,1,""],JavascriptPhpLexer:[12,0,1,""],JavascriptSmartyLexer:[12,0,1,""],JspLexer:[12,0,1,""],LassoCssLexer:[12,0,1,""],LassoHtmlLexer:[12,0,1,""],LassoJavascriptLexer:[12,0,1,""],LassoXmlLexer:[12,0,1,""],LiquidLexer:[12,0,1,""],MakoCssLexer:[12,0,1,""],MakoHtmlLexer:[12,0,1,""],MakoJavascriptLexer:[12,0,1,""],MakoLexer:[12,0,1,""],MakoXmlLexer:[12,0,1,""],MasonLexer:[12,0,1,""],MyghtyCssLexer:[12,0,1,""],MyghtyHtmlLexer:[12,0,1,""],MyghtyJavascriptLexer:[12,0,1,""],MyghtyLexer:[12,0,1,""],MyghtyXmlLexer:[12,0,1,""],RhtmlLexer:[12,0,1,""],SmartyLexer:[12,0,1,""],SspLexer:[12,0,1,""],TeaTemplateLexer:[12,0,1,""],TwigHtmlLexer:[12,0,1,""],TwigLexer:[12,0,1,""],VelocityHtmlLexer:[12,0,1,""],VelocityLexer:[12,0,1,""],VelocityXmlLexer:[12,0,1,""],XmlDjangoLexer:[12,0,1,""],XmlErbLexer:[12,0,1,""],XmlPhpLexer:[12,0,1,""],XmlSmartyLexer:[12,0,1,""],YamlJinjaLexer:[12,0,1,""]},"pygments.lexers.teraterm":{TeraTermLexer:[12,0,1,""]},"pygments.lexers.testing":{GherkinLexer:[12,0,1,""],TAPLexer:[12,0,1,""]},"pygments.lexers.textedit":{AwkLexer:[12,0,1,""],VimLexer:[12,0,1,""]},"pygments.lexers.textfmts":{GettextLexer:[12,0,1,""],HttpLexer:[12,0,1,""],IrcLogsLexer:[12,0,1,""],NotmuchLexer:[12,0,1,""],TodotxtLexer:[12,0,1,""]},"pygments.lexers.theorem":{CoqLexer:[12,0,1,""],IsabelleLexer:[12,0,1,""],LeanLexer:[12,0,1,""]},"pygments.lexers.trafficscript":{RtsLexer:[12,0,1,""]},"pygments.lexers.typoscript":{TypoScriptCssDataLexer:[12,0,1,""],TypoScriptHtmlDataLexer:[12,0,1,""],TypoScriptLexer:[12,0,1,""]},"pygments.lexers.unicon":{IconLexer:[12,0,1,""],UcodeLexer:[12,0,1,""],UniconLexer:[12,0,1,""]},"pygments.lexers.urbi":{UrbiscriptLexer:[12,0,1,""]},"pygments.lexers.varnish":{VCLLexer:[12,0,1,""],VCLSnippetLexer:[12,0,1,""]},"pygments.lexers.verification":{BoogieLexer:[12,0,1,""],SilverLexer:[12,0,1,""]},"pygments.lexers.webmisc":{CirruLexer:[12,0,1,""],DuelLexer:[12,0,1,""],QmlLexer:[12,0,1,""],SlimLexer:[12,0,1,""],XQueryLexer:[12,0,1,""]},"pygments.lexers.whiley":{WhileyLexer:[12,0,1,""]},"pygments.lexers.x10":{X10Lexer:[12,0,1,""]},"pygments.lexers.xorg":{XorgLexer:[12,0,1,""]},"pygments.lexers.zig":{ZigLexer:[12,0,1,""]},"pygments.styles":{get_all_styles:[0,2,1,""],get_style_by_name:[0,2,1,""]},"pygments.util":{OptionError:[0,5,1,""],get_bool_opt:[0,2,1,""],get_choice_opt:[0,2,1,""],get_int_opt:[0,2,1,""],get_list_opt:[0,2,1,""]},pygments:{format:[0,2,1,""],formatter:[0,1,0,"-"],formatters:[0,1,0,"-"],highlight:[0,2,1,""],lex:[0,2,1,""],lexer:[0,1,0,"-"],lexers:[0,1,0,"-"],styles:[0,1,0,"-"],token:[18,1,0,"-"],util:[0,1,0,"-"]}},objnames:{"0":["py","class","Python class"],"1":["py","module","Python module"],"2":["py","function","Python function"],"3":["py","attribute","Python attribute"],"4":["py","method","Python method"],"5":["py","exception","Python exception"]},objtypes:{"0":"py:class","1":"py:module","2":"py:function","3":"py:attribute","4":"py:method","5":"py:exception"},terms:{"0000aa":6,"00ff00":7,"00m":17,"01m":17,"01mhello":17,"0b101010":18,"0f0":17,"0x10c":12,"0x20":12,"0xb785decc":5,"0xdeadbeef":18,"14px":7,"16m":[2,7],"42d":18,"42l":18,"6pl":12,"6pm":12,"beno\u00eet":1,"boolean":[0,2],"break":7,"byte":[2,7,15,19],"case":[0,2,3,4,5,7,11,19,21],"char":[2,11,12,18],"cl\u00e9ment":1,"class":[0,2,3,4,5,6,10,12,13,14,15,17,18],"const":18,"dani\u00ebl":1,"default":[0,1,2,3,5,7,11,12,15,17,19],"enum":12,"export":2,"final":[2,11],"float":[0,2,18],"function":[0,2,4,5,6,11,12,14,15,17,18,21],"g\u00f3rny":1,"goto":18,"guti\u00e9rrez":1,"hegg\u00f8":1,"helles\u00f8i":1,"import":[2,4,5,6,7,10,11,12,15,17,18,19],"int":[5,18],"joaqu\u00edn":1,"jos\u00e9":1,"kl\u00e4rck":1,"ko\u017ear":1,"long":[2,18],"lyngst\u00f8l":1,"maik\u00e4fer":2,"matth\u00e4u":22,"micha\u0142":1,"new":[0,2,3,4,5,6,7,10,12,14,17,18,19,22],"null":[7,12],"pr\u00e9vost":1,"public":[2,12],"ren\u00e9":1,"return":[0,5,6,7,11,12,15],"s\u00e9bastien":1,"schneegl\u00f6ckchen":2,"short":[0,3,7,11,12],"st\u00e9phane":1,"static":[0,18],"strau\u00dfenei":2,"super":11,"switch":[2,12],"true":[0,2,5,6,7,11,12,13,15,18],"try":[6,12],"unsch\u00e4rf":2,"var":[2,12,18],"while":[2,3,6,11,12,15,21],Abe:1,Added:[2,12],And:11,BAS:12,But:11,DOS:12,EXE:12,For:[0,3,5,6,7,11,12,13,15,17,18,20,21],GAS:2,Gas:12,IDE:21,IDEs:2,Its:[3,21],One:11,SAS:[1,2],That:[0,4,7,11,14,17,21],The:[2,3,4,5,6,7,8,9,10,12,15,17,18,19,20,21,23],Then:[7,11],There:[2,5,11,14,15,17],These:[7,9,12],Use:[2,8,12],Used:7,Useful:12,Using:[7,8],VBS:12,Vos:1,Will:0,With:[2,7,18],XDS:12,__all__:11,__doc__:18,__init__:[0,4,6,11,18],_by_:2,_format_lin:7,_lua_builtin:12,_php_builtin:12,_style:6,_wrap_cod:7,aa22ff:15,aamann:12,aaron:1,abandon:11,abap:[1,2,12],abaplex:12,abil:2,abl:[1,2,12],abnf:12,abnflex:12,about:[2,3,8,11,12,14],abov:[3,11],absolut:7,abysm:2,academ:12,accept:[2,5,7,12,13,15,19],access:[4,6,7,12,18],accord:12,accordingli:7,accur:2,acm:12,aconf:12,acorn:12,act:12,action:11,actionscript3:12,actionscript3lex:12,actionscript:[2,23],actionscriptlex:12,activ:[1,12],activest:21,actual:[2,12],ada2005:12,ada95:12,ada:[1,2,12,23],adalex:12,adam:[1,2,12],adapt:[5,9,16],adb:12,add:[0,2,5,7,11,14,21],add_filt:[4,5],added:[2,3,4,7,11,17,21,22],adding:[11,14],addit:[2,6,7,11,12,15,21],addition:[4,6,7,11,13],address:22,adjust:4,adl:12,adlf:12,adllex:12,adlx:12,ado:12,adob:12,ads:12,adt:12,advanc:10,affect:[2,7,17],affix:[2,18],afshar:1,after:[2,3,7,11,12,18],again:[6,7,11],against:11,agda:[1,2,12],agdalex:12,aglassing:1,aglet:12,aheui:12,aheuilex:12,ahk:12,ahkl:12,aim:12,alain:1,alastair:1,alex:1,alexand:1,algol:[2,12],algol_nu:12,algorithm:[7,12],ali:1,alia:[0,2,12,14,15],alias:[0,2,11,12,14,18],alias_filenam:0,align:[7,17],all:[0,1,2,3,4,5,6,7,11,13,15,17,18,19,20,21],alloi:[2,12],allow:[0,2,5,7,9,11,12,17],alloylex:12,almost:[11,12],along:3,alphabet:[1,12],alreadi:[0,5,11,12,18],als:12,also:[0,2,3,4,7,11,12,13,16,17,18,19,21,22,23],altern:[2,7,11,12],alwai:[2,4,12,22],amann:[1,2,12],ambient:12,ambienttalk:2,ambienttalklex:12,ambigu:15,among:[0,15,22],amount:[5,11],ampl:[1,2],ampllex:12,ana:[1,2],analog:2,analys:[0,19],analyse_text:[0,2,15],analysi:2,anchor:7,anchorlineno:[1,2,7],andr:[1,2],andrea:[1,2,12],andrei:1,andrew:1,android:12,angl:5,angu:1,angular2:12,angular2htmllex:12,angular2lex:12,angular:12,ani:[0,2,3,7,11,12,17,18,19],annamalai:1,annot:[2,5,12],anonym:11,anoth:[2,5,11,18,19,21],ansi:[1,2,7,12,15,17,21,22],ansiblack:17,ansiblu:17,ansibrightblack:17,ansibrightblu:17,ansibrightcyan:17,ansibrightgreen:17,ansibrightmagenta:17,ansibrightr:17,ansibrightyellow:17,ansibrown:17,ansicolor:17,ansicyan:17,ansidarkblu:17,ansidarkgrai:17,ansidarkgreen:17,ansidarkr:17,ansifuchsia:17,ansigrai:17,ansigreen:17,ansilightgrai:17,ansimagenta:17,ansipurpl:17,ansir:17,ansit:17,ansiturquois:17,ansiwhit:17,ansiyellow:17,antlr:[1,2,12,23],antlractionscriptlex:12,antlrcpplex:12,antlrcsharplex:12,antlrjavalex:12,antlrlex:12,antlrobjectiveclex:12,antlrperllex:12,antlrpythonlex:12,antlrrubylex:12,antonio:9,anymor:2,anyth:[2,3,7,11,12],apach:[2,10,12,23],apache2:[2,12],apacheconf:12,apacheconflex:12,api:[8,11,15],apl:[1,2],apllex:12,apostroph:2,appaiah:1,appear:6,append:11,applescript:[1,2,12,23],applescriptlex:12,appli:5,applic:[2,12,22],approach:[11,12],appropri:[7,11,12],apt:12,arbitrari:11,archetyp:[1,2],arduino:[1,2,12],arduinolex:12,area:7,aren:[6,11,17,18],arexx:12,arg:[0,2,3,7],argument:[0,2,3,4,5,6,7,11,15],armin:[1,22],armstrong:1,arnold:1,around:[2,3,12],arrow:12,art:12,artem:1,articl:7,artifactid:10,as3:[2,12],asax:12,ascii:[3,7,12,19],asciidoc:21,ascx:12,ashkena:1,ashx:12,asi:12,ask:2,aslak:1,aslakhellesoi:12,asm:[2,12],asmx:12,asp:[2,12],aspectj:[1,2,12],aspectjlex:12,aspx:12,assembl:[1,2,23],assign:12,assist:12,associ:11,assum:[7,12,21],assumpt:12,asterisk:11,asymptot:[1,2,12,23],asymptotelex:12,async:2,atom:12,atria:1,attach:[13,18],attent:22,attribut:[0,2,3,6,7,11,12,14,15,18,19],au3:12,aug:[2,12],augea:[1,2,12,23],augeaslex:12,aust:[1,12],author:[12,18,21],autodeleg:12,autodetect:12,autohandl:12,autohotkei:[1,2,12],autohotkeylex:12,autoit:[1,2,12],autoitlex:12,autolisp:12,automat:[2,4,7,11,12,17,19],autopygment:2,aux:12,avail:[0,2,8,11,15,18,21,22],avoid:[2,15],await:2,awar:2,awk:[1,2,12,23],awklex:12,axd:12,b3d:12,background:[2,5,6,7,17],backquot:2,backreferenc:11,backslash:[2,12],backtick:18,backtrack:2,backward:[2,18],bajolet:1,bangert:[1,2],bar:[5,7,11],barfoo:11,baruchel:1,bas:12,base:[0,1,2,7,11,12,18,21,22,23],baselex:11,baselin:7,basemak:12,basemakefilelex:12,bash:[2,12,18,23],bashcomp:9,bashlex:[2,12],bashrc:12,bashsessionlex:[2,12],basic:[0,2,6,11,15,21,23],bat:12,batch:[2,12,23],batchlex:12,battcher:1,baumann:1,baumgart:1,bayer:1,baz:5,bazaar:21,bazbam:12,bazel:12,bb4444:15,bbc:[2,12,23],bbcbasic:12,bbcbasiclex:12,bbcode:[1,2,7,12,21,23],bbcodeformatt:7,bbcodelex:12,bclexer:12,beal:1,bean:12,beauti:21,becaus:[0,2,3,4,6,7,11,12,14,17,18,19],been:[2,5,7,10,11,12,22],befor:[2,7,12,19],befung:[2,12,23],befungelex:[11,12],begin:[7,11,12],behavior:[2,4,17,19],being:[2,11,15,17],below:[11,18,22],ben:[1,2],benediktsson:1,benjamin:1,bergeron:1,bernat:1,bertel:1,bertrand:1,best:19,better:[2,12],between:[2,7,11,12,17],bgcolor:6,bib:12,bibtex:1,bibtexlex:12,big:12,bigaret:1,billingslei:[1,2],bin:[15,18],binari:[2,12,18],bind:12,bird:12,bit:[2,7],bitbucket:21,bitmap:7,bitstream:7,black:17,blackwhitestyl:2,blame:12,blink:12,blinkinsop:[1,2,12],blitzbas:[1,2,12],blitzbasiclex:12,blitzmax:[1,2,12],blitzmaxlex:12,block:[2,7,12],blondon:1,blue:[6,17],bmax:12,bmp:[2,7],bmpimageformatt:7,bmx:12,bnflexer:12,boa:[2,23],boalex:12,board:[7,12,21],bob:1,bodi:12,body_lex:12,bold:[2,6,7,15,17,18],boldfac:[12,17],bom:19,bommel:1,boo:[2,12,18,23],boogi:[1,2,12],boogielex:12,bool:5,boolex:12,border:[6,7,17],borland:12,both:[11,12,15,17],boundari:12,bourdon:[1,2],bpl:12,bplu:12,bpython:21,brace:[2,12],bracket:12,brainfuck:[12,23],brainfucklex:[11,12],branch:21,brandl:[1,22],brian:1,bright:[2,17],brightblack:17,brightblu:17,brightcyan:17,brightgreen:17,brightmagenta:17,brightr:17,brightyellow:17,brillouin:2,bro:[1,2,12],broken:7,browser:[7,11,21],bruce:[1,21],bruno:1,bryan:1,bsd:[2,12,21],bsdmake:12,bst:12,bstlexer:12,buck:12,buffer:[1,2,6,12],bug:[1,2,5,8,12,22],bugfix:[1,2],bugslex:12,build:[2,12,21],builder:21,built:[2,7,12,15],builtin:[0,2,7,8,11,12,15],builtinshighlight:12,bulletin:7,bump:2,bundl:[9,12],bussonni:1,bygroup:11,bz2:[7,12],bzl:12,bzrfruit:21,c99:[2,12],c_cpp:12,c_like:12,ca65:12,ca65lex:12,cacer:1,cach:[2,6,12],cadl:12,cadllex:12,call:[0,2,7,11,12,13,15,17,18,19,20],callaghan:1,caller:11,camil:1,camk:[1,2,12],camkeslex:12,can:[0,2,3,4,5,6,7,9,10,11,12,13,14,15,16,17,18,19,20,22],cangiano:9,cannot:11,canon:12,cap:2,capabl:2,capdl:[2,12],capdllex:12,capit:5,capnp:12,capnproto:12,capnprotolex:12,caption:7,captur:11,care:[0,11],carlo:1,cascad:12,cat:1,catalog:[2,12,23],catastroph:2,categori:[3,12],caus:[2,7,12],cbl:12,cbm:12,cbmba:12,cbmbasicv2lex:12,cdf:12,cdl:12,cell:7,central:10,certain:[2,7,11],cetera:16,ceylon:[1,2,12],ceylonlex:12,cf3:12,cfc:[2,12],cfengine3:[1,2,12],cfengine3lex:12,cfg:[11,12],cfm:12,cfml:12,cfs:12,chai:12,chain:[2,12],chaiscript:[2,12],chaiscriptlex:12,chajda:22,chanc:14,chang:[2,7,12,15,17,20],changelog:[8,20],chapel:[1,2],chapellex:12,charact:[2,3,5,7,11,12,18,19],chardet:[12,19],charl:1,charm:[2,12,23],charmci:12,charmcilex:12,charna:2,chdr:12,chebee7i:1,check:[2,4,11,12],checkspac:12,chee:1,cheetah:[1,2,12,23],cheetahhtmllex:12,cheetahjavascriptlex:12,cheetahlex:12,cheetahxmllex:12,child:[2,11],chines:21,chirino:1,chpl:12,christian:1,christoph:[1,2,12],cirru:[2,12],cirrulex:12,clai:[1,2,12],clariti:7,clash:7,class_too:4,classifi:2,classnotfound:[0,2],classprefix:7,classtoo:4,claylex:12,clean:[1,2],cleanlex:12,clear:22,clexer:[2,12],cli:18,click:7,clipper:12,clj:12,clobber:2,clojur:[1,2,9,12,21,23],clojurelex:12,clojurescript:12,clojurescriptlex:12,clone:[11,20],close:[6,11],closingcommentdelim:12,cls:12,cluster:12,clygment:[9,21],cmake:[2,12,23],cmakelex:12,cmakelist:12,cmd:12,cmdline:1,cob:12,cobjdumplex:12,cobol:12,cobolfre:12,cobolfreeformatlex:12,cobollex:12,cocoa:2,code:[0,2,5,7,9,10,11,13,15,16,17,18,20,21,22],codecov:21,codehtmlformatt:7,codenam:2,codetag:[2,5,7],codetagfilt:5,codetagifi:5,coffe:12,coffeescript:[1,2,12,23],coffeescriptlex:12,coldfus:[2,12,23],coldfusioncfclex:12,coldfusionhtmllex:12,coldfusionlex:12,colin:1,collabor:21,collis:2,colon:[2,3,11],color:[2,3,4,5,6,7,9,12,13,15,17,21],colorama:2,colorfulstyl:17,colornam:7,colorschem:7,colour:7,column:7,com:[1,2,8,12,20,21,23],combin:[0,2,7,11,12],come:[5,17],comma:[2,3],command:[0,2,7,8,9,11,14,18,19,20,21,22],commandchar:7,commandlin:12,commandprefix:[2,7],comment:[2,5,6,7,11,12,15,17],commit:2,common:[1,2,12,21,23],commonli:12,commonlisplex:12,commun:21,compar:[12,18],compat:[2,7,18],compens:21,compil:12,complet:[2,7,11,21],complex:[6,11,12],compon:[2,8,12,15],componentpasc:12,componentpascallex:12,compress:[0,7,12],comput:1,concaten:11,concept:15,concret:12,concurr:12,conf:[2,12],config:[1,2,13,23],configur:[2,13,18,23],conflict:2,confus:[4,11,12],consecut:[5,6],consid:[12,17],consider:[0,12],consist:[2,11],consol:[1,2,3,7,21,23],console16m:7,console256:7,constant:[12,18],constitut:2,construct:[2,7,11,12,18],constructor:[0,5],consum:12,contact:21,contain:[0,2,3,5,6,7,9,11,12,15,21],content:[0,2,7,10,11,12,15],context:[2,11,12],contextu:2,continu:[2,11],contribut:[1,12,23],contributor:8,control:[0,1,2,7,12,20,21,23],convers:12,convert:[0,2,4,5,7,12,15,17,18,21],cookbook:21,cooper:1,coordin:7,copi:[7,9,11,12,13,16],coq:[2,12,23],coqlex:12,corbett:1,corbin:1,corcoran:1,corei:1,corner:2,coroutin:12,correct:[0,2],correctli:[2,7,11,21],correspond:[11,12,17],could:[3,4,7,11,12,15,17],count:12,coupl:2,courier:7,cours:[18,21],cover:[12,14],coverag:21,cpp:12,cppcommentlex:11,cpplexer:12,cppobjdumplex:12,cps:12,cpsa:[1,12],cpsalex:12,cpy:12,cpython:12,crash:[2,11],creat:[2,3,6,7,9,10,11,15,18,21],creation:12,creutzig:[1,2,12],crmsh:[1,2,12],crmshlexer:12,croc:[2,12],croclex:12,crocsrc:12,crompton:1,cross:21,crunchi:2,cry:12,cryptol2:12,cryptol:[1,2,12,23],cryptollex:12,crystal:[1,2,23],crystallex:12,csail:12,csd:12,csh:12,csharp:12,csharpaspxlex:12,csharplex:[2,12],csound:[1,2],csounddocumentlex:12,csoundorchestralex:12,csoundscorelex:12,csrc:12,css:[0,2,3,6,7,13,15,17,23],cssclass:[2,7,15],cssdjangolex:12,csserblex:12,cssfile:7,cssgenshilex:12,csslexer:12,cssphplexer:12,csssmartylex:12,cssstyle:7,ctag:[1,2,7],ctx:11,cucumb:[2,12,23],cuda:[1,2,12],cudalex:12,cuh:12,curli:12,current:[0,3,6,7,11,12,13,18,20],curri:1,curs:21,custom:[2,7,11,16,17],customformatt:[0,3],customlex:[0,3,11],cxx:12,cyan:17,cyp:12,cypher:[2,12],cypherlex:12,cython:[2,12,23],cythonlex:12,dalvik:2,dan:1,daniel:1,darc:[1,2,12],darcspatch:12,darcspatchlex:12,dark:[2,6,7,17],darkbg:7,darkgrei:17,dart:[1,2,12,23],dartlex:12,dash:[1,2],dasm16:[1,2,12],dasm16lex:12,dasm:12,data:[2,6,11,13,18,19],databas:12,date:18,davi:1,david:1,dba:12,dcl:12,dcpu:[12,23],debcontrol:12,debian:[1,2,12,23],debiancontrollex:12,debsourc:12,debug:[12,21],debugg:21,dec:2,decid:12,decis:2,decl:12,declar:[0,2,7,12,18],decod:19,decompress:12,decor:[2,18],deepcopi:2,deepli:11,def:[0,2,4,6,7,11,12],default_styl:17,deferrari:1,defin:[0,2,6,7,11,12,14,17,18],definit:[0,2,3,7,11,14,17,18],degener:2,dejan:1,dejavu:7,delai:2,deleg:12,delegatinglex:[11,12],delet:[11,18],delimit:[2,7,11,12,18],delphi:[12,23],delphilex:12,delroth:2,demonstr:21,denni:[1,2],depend:[0,2,7,10,12,17],deploi:10,deprec:17,deriv:[0,2,12],derivedlex:11,describ:[0,7],descript:12,design:12,desir:[5,7],desis:12,detail:[3,11,21,22],detect:[2,12],determin:[0,2,12,15],develop:[1,3,12,21,22,23],dglexer:12,dhandler:12,dialect:2,dialectopt:12,dialecttag:12,diamanda:21,dict:[6,18,19],dictionari:[0,7,11],didn:3,diego:1,dietmar:1,diff:[2,11,23],differ:[2,5,7,11,12,15,17,19,21],differenti:12,difflex:[11,12,18],digia:12,direct3d:12,direct:[2,7,11,12,13,16,21],directli:[2,11,12],directori:[0,2,3,11,13,17],disabl:7,disabled_modul:12,disabledmodul:12,disallow:2,disappoint:12,displai:[2,7,16],disrupt:5,distinct:7,distinguish:12,distribut:[2,9,11,13,14,16,17,20],div:[7,10,15],divis:12,django:[12,21,23],djangolex:12,dlexer:12,dmitri:1,do_insert:2,dobjdumplex:12,doc:[2,7,12,17,18,19,21,23],docclass:7,docker:[2,12],dockerfil:[2,12],dockerlex:12,docstr:[2,5],doctest:12,doctyp:[2,7],document:[2,3,7,10,11,12,14,15,18,21,22],docutil:2,doe:[2,7,11,12,19],doesn:[2,3,4,6,7,11,12,14,18],domen:1,dominik:1,don:[0,2,3,6,7,11,12,15,17,18,19,22],done:[2,11],doren:1,dos:12,dosbatch:12,doscon:12,dosini:12,dot:[2,5],dotal:11,dotnet:12,doubl:[5,6,18],doug:1,down:11,download:[8,22],dpast:21,dpatch:12,dpr:12,drawn:7,dreiundzwanzig:2,drop:[0,2,5,17,21],dsrc:12,dtd:[1,2,12,23],dtdlexer:12,dubi:12,dubinska:1,due:[0,2],duel:[1,2,12],duelengin:12,duellex:12,duplic:2,dure:12,durni:1,dustin:1,dutton:1,dyl:12,dylan:[1,2,23],dylanconsolelex:12,dylanlex:12,dylanlidlex:12,dynam:12,each:[7,11,18],earl:12,earlgrei:12,earlgreylex:12,earlier:[7,12],easi:[2,4,6,7,11,22,23],easier:[7,10,17,21],easiest:11,easili:[2,6,11,12,16,21,22],easytriev:[1,2,12],easytrievelex:12,eat:[2,5],ebnf:[1,2,12],ebnflex:12,ebuild:12,echdr:12,ecl:2,eclass:12,eclex:12,ecllex:12,ecsrc:12,edg:7,edit:13,editor:21,edoardo:1,edp:12,edu:12,edward:1,eed:7,eee:17,eeeeeee:6,effect:7,efford:1,effting:1,egg:11,egorkin:1,eiffel:[1,2],eiffellex:12,either:[2,6,7,11,15,17],element:[7,11,18],elf:12,elia:1,elisp:12,elixir:[1,2,12],elixirconsolelex:12,elixirlex:12,ellipsi:2,elm:[2,23],elmlex:12,els:[6,7,11,12,15,18],elseif:11,elxir:2,emac:[1,2,3,12,17],emacslisplex:12,email:[2,12,23],emaillex:12,embed:21,emit:11,eml:12,emph:18,emphas:18,empti:[2,7,11,12],emul:17,enabl:[3,7,16],enclos:[3,7,12,18],encod:[2,7,8,10,12,15,17],encount:[11,19],encyclopedia:21,end:[2,4,6,7,11,12,18,21],enforc:5,engin:[2,11,21],english:21,enhanc:[1,2],enough:[7,11,12],enriqu:1,ensur:11,ensurenl:[2,12],enter:[11,12],entir:[11,12],entiti:[11,18],entri:[3,11,15],environ:[2,7,12,21],envnam:[2,7],eps:12,equal:[2,3,11],equal_sign:11,equival:[0,11,12],erb:[12,23],erblex:12,eric:1,erick:1,erl:12,erlang:[1,2,23],erlanglex:12,erlangshelllex:12,erron:11,error:[0,2,5,7,11,12,17,18,21],error_color:7,errortoken:5,es6:2,escap:[2,7,11,12,15,17,18],escapeinsid:7,escript:12,esoter:[2,11],especi:[12,18],etc:[2,3,5,6,18],eval:[0,11],even:[7,11,12,22],event:12,ever:23,everi:[0,2,5,7,11,15,17,22],everybodi:11,everyth:[11,12,14],evoqu:[1,2,12],evoquehtmllex:12,evoquelex:12,evoquexmllex:12,exact:17,exactli:[0,11],exampl:[0,3,4,5,6,7,10,11,12,13,17,18,19,21],examplefil:11,examplelex:11,excclass:5,except:[0,2,5,7,11,12,17,18],exclud:18,exec:10,exher:12,exhibit:2,exhypotheticlex:11,exist:[2,6,7,11,15,18],exlib:12,expand:[5,12],expans:3,expect:[0,3,11],experi:21,experiment:[2,7],explain:[6,14,15],explan:[3,18],explicit:[3,7],explicitli:[3,11],explor:7,express:[2,11,12,18],exrc:12,exs:12,ext:14,extempor:[1,2,12],extemporelang:12,extend:[2,7,11,12,17],extens:[2,3,11,15],extern:[2,7,9,12,13,16],extra:[7,12],extra_keyword:11,extract:[12,14],extrem:11,ezhil:[1,2,23],ezhillex:12,ezt:12,f00:17,f03:12,f90:12,facil:[1,12],fact:[0,12],factor:[1,2,23],factorlex:12,fail:2,failur:2,fallback:12,fallenstein:12,fals:[0,2,5,7,12,18],famili:[2,7],fan:12,fanci:[1,2,12,23],fancylex:12,fancypack:12,fancysrc:12,fancyvrb:7,fantom:[1,2],fantomlex:12,faq:22,far:11,faschingskrapfn:2,fast:[21,22],faster:2,fastest:12,favor:2,favorit:11,fayaa:21,fear:[11,22],featur:[2,7,11,12],feb:2,fed:5,feel:11,felix:1,felixlex:12,fenc:2,fennel:[1,2,12,23],fennellex:12,fernandez:1,few:[2,11,15,17,23],fext:7,ff0000:[6,17],ffffff:17,fhtml:12,ficarra:1,field:12,file:[0,1,2,3,6,7,9,10,11,13,15,17,18,19,21,23],filenam:[0,2,3,7,11,12,13,14,15],filetext:2,filetyp:12,filter:[2,8,12,14,15,18],find:[2,8,11,21],find_lexer_class:0,find_lexer_class_by_nam:[0,2],fine:12,finish:11,first:[0,2,5,6,7,11,12,15,18],firstchild:7,fish:[1,2,12],fisher:1,fishshel:12,fishshelllex:12,fix:[1,2,5,7,11,12,22],fixm:12,flag:[3,5,12,17],flatlin:[1,2,12],flatlinelex:12,flexibl:2,flo:12,florian:1,floscript:[1,2,23],floscriptlex:12,fluidic:21,flx:12,flxh:12,fmarc:2,fmter:7,fname:7,fnl:12,fnmatch:0,focus:21,fold:12,follow:[0,3,6,7,10,11,12,15,17,18,19],font:[2,6,7,15],font_nam:7,font_siz:[2,7],fontfac:7,fontfamili:7,fontsiz:7,foo:[2,5,7,11,12,18],foobar:[11,12],foreground:[7,17],forev:2,form:[0,1,3,6,7,11,12,17],formal:12,format:[0,2,3,6,7,15,21,22],formatt:[1,2,8,10,11,14,17,19],formatternam:0,former:2,forth:1,forthlex:12,fortran:[1,2,23],fortranfix:12,fortranfixedlex:12,fortranlex:12,forum:22,forward:[5,12],found:[0,2,3,7,9,11,12,15,22],four:[2,15],fourth:15,foxpro:[1,2,23],foxprolex:12,frag:12,fragment:[12,15],frame:2,framework:[1,2,21,23],free:[11,12,21],freefem:[1,2,23],freefemlex:12,freepasc:12,freewar:12,friendli:[7,12,17],frit:1,from:[0,1,2,3,4,5,6,7,10,12,13,14,15,16,17,18,19,20],front:21,frt:12,fruiti:2,fsharp:12,fsharplex:12,fsi:12,fulfil:11,full:[2,3,7,8,11,12,15],fulli:2,fulton:1,fun:[12,23],func_name_highlight:12,funcnamehighlight:12,funki:11,further:[7,18],furthermor:[12,17],fusesourc:12,futur:[6,12,18],futurewarn:2,galdino:1,galloi:1,gap:[1,2,12,23],gaplex:12,garg:1,garnotel:1,gas:12,gaslex:12,gautier:1,gave:2,gawk:12,gaynor:1,gdc:12,gemfil:12,gemspec:12,gener:[0,2,5,7,11,17,21,22],genshi:[1,12,23],genshilex:12,genshitext:12,genshitextlex:12,gentoo:2,geo:12,georg:[1,22],gerd:1,gerkin:1,gerwin:1,get:[0,2,5,7,10,11,12,13,18,19,20],get_:2,get_all_filt:5,get_all_lex:[0,12],get_all_styl:[0,17],get_bool_opt:[0,4],get_choice_opt:0,get_formatter_by_nam:[0,15],get_formatter_for_filenam:[0,15],get_int_opt:0,get_lexer_by_nam:[0,2,12,14,15],get_lexer_for_filenam:[0,15],get_lexer_for_mimetyp:[0,15],get_list_opt:0,get_style_by_nam:[0,17],get_style_def:[0,2,3,6,7,15],get_syntax_def:7,get_token:[0,2],get_tokens_unprocess:[0,11],getpreferredencod:19,gettext:[2,12,23],gettextlex:12,gherkin:[1,2,12,23],gherkinlex:12,giedriu:1,gif:[2,7],gifimageformatt:7,gilbert:1,gild:1,git:[2,12,20,21],github:[1,2,8,11,12,20,21,22,23],give:[2,3,5,7,11,15],given:[0,2,3,5,7,11,12,15,19],global:[11,18],glshaderlex:12,glsl:[2,12],glslsrc:12,glue:12,gm2:12,gnu:[2,12],gnumakefil:12,gnuplot:[2,12,23],gnuplotlex:12,gobbl:5,gobblefilt:5,goe:11,goetzmann:1,goj:[1,2],golda:1,golex:12,golo:[2,12],gololex:12,golovizin:1,good:[1,2,6],gooddata:[1,2,12],gooddatacllex:12,googl:[1,2],gordon:1,gosrc:12,goss:1,gosu:[2,12],gosulex:12,gosutemplatelex:12,gotthardt:1,govern:17,gracefulli:2,gradl:12,grai:[6,17],grammar:[11,12],grammar_not:12,graph:21,graphic:7,greater:12,greatli:2,green:17,greg:1,grei:[5,12],groff:[2,12,23],grofflex:12,groovi:[1,2,12,23],groovylex:12,group:11,groupid:10,grow:23,gsp:12,gst:12,gsx:12,guarante:18,guess:[0,2,3,7,12,19],guess_lex:[0,2,15],guess_lexer_for_filenam:[0,15],gui:[12,21],guib:1,guid:[5,12],gvimrc:12,hack:[8,14],hagelberg:1,hahn:1,half:7,haml:[1,2,12],hamllex:12,handl:[0,1,2,7,12,19],handlebar:[2,12],handlebarshtmllex:12,handlebarsj:12,handlebarslex:12,handlecodeblock:12,happen:11,harder:2,harriman:1,harrison:1,has:[0,2,3,4,6,7,9,10,11,12,17,19],hash:2,hashbang:18,haskel:[1,2,23],haskelllex:12,hatch:[1,2,22],have:[0,2,3,4,5,6,7,8,11,12,13,14,15,17,18,19,21,22],haven:[3,19],hax:[1,2],haxelex:12,haxeml:12,hazel:1,hbs:12,hdl:12,hdp:12,hdr:12,head:[3,11,12,18],header:[2,12],headlin:[11,18],headline_callback:11,height:7,hello:[3,10,15,17],help:[0,11,21,22],helper:[2,11],hendershott:1,hendrick:1,herbstzeitlos:2,here:[6,7,9,10,11,14,15,17,21,23],heredoc:[2,18],hermoso:1,hess:1,hex:[2,6,12,18],hexadecim:[6,18],hexcat:12,hexdump:[1,2,12],hexdumplex:12,hierarchi:18,high:[12,14],higher:[7,21],highest:[0,15],highlight:[0,2,3,5,7,9,10,11,12,13,14,15,17,18,21,22],highlightt:7,hilit:21,hint:8,hiram:1,hiremath:1,hiroaki:1,histori:12,hl_color:[2,7],hl_line:[2,7],hlsl:[1,2,12,23],hlsli:12,hlslshaderlex:12,hoelz:1,hogan:1,hold:11,holli:1,home:22,hong:1,horizont:7,horn:1,host:[12,21,22],houghton:1,how:[0,2,6,7,10,11,12,15,16,17],howard:1,howett:1,howev:[2,7,15,21],hpp:12,hrl:12,hsa:12,hsail:[1,2,12],hsaillex:12,hspec:[2,12,23],hspeclex:12,htaccess:12,htdoc:13,htm:[7,11,12],html5:[2,7],html:[0,1,2,3,7,11,13,15,17,18,21,22,23],htmlcheetah:12,htmldjango:12,htmldjangolex:12,htmlformatt:[0,2,6,7,10,15,17,19],htmlgenshilex:12,htmllexer:[11,12],htmlphplexer:[11,12],htmlsmartylex:12,http:[1,2,8,10,12,20,21,23],httpd:12,httplexer:[2,12],huge:11,human:[0,12],hundr:11,hurl:21,hxml:12,hxmllexer:12,hxsl:12,hxx:12,hyb:12,hybri:[1,2,12],hybrislex:12,hylang:12,hylex:12,hyperlink:7,hypothet:11,hypotheticlex:11,i18n:2,i6t:12,i7x:12,iOS:2,ian:[1,2],icl:12,icn:12,iconlex:12,icu:12,icw:12,idc:12,idea:[2,10],ideal:12,identifi:[0,2,12,18],idl4:12,idl:[1,2,23],idllex:12,idr:12,idri:[1,2,12],idrislex:12,iec:12,ieee:12,ietf:12,iex:12,ignor:[6,7,12],ignorecas:11,igor:[1,2],igorexchang:12,igorlex:12,igorpro:12,ijs:12,imag:[1,2,7,12],image_format:7,image_pad:7,imageformatt:[2,7],img:7,immedi:0,implement:[0,2,6,7,12],implicit:18,imposs:11,improv:[1,2,5,7],inc:[1,12],incl:23,includ:[0,2,3,7,11,13,15,16,19,20,21],inclus:11,incollo:21,incompat:2,incomplet:[2,21],incorrect:2,incorrectli:12,increas:[7,11,22],incred:11,indent:[2,5,7,12],index:[0,7,11,20],indexerror:11,indic:[2,7,11],individu:7,industri:12,inencod:[2,3,19],inf:12,infer:3,inferno:1,infinit:2,influenc:11,info:12,inform6:12,inform6lex:12,inform6templatelex:12,inform7:12,inform7lex:12,inform:[1,2,3,7,11,12,14,15,18],ing:2,inherit:[1,2,11,17],ini:[2,11,12,23],inilex:[11,12],initi:[2,5,6,11],initialis:11,inkpot:2,inlin:[2,6,7,15],inlinestyl:13,ino:12,inozemtsev:1,input:[0,2,3,7,11,12,15,19],inputenc:7,insensit:[0,2],insert:[2,11,18],insid:[7,11,18],instal:[2,3,7,8,11,19,21],instanc:[0,2,4,5,10,11,12,13,17,18],instanti:[0,4,6,11],instead:[2,6,7,11,12,15,17,21],instruct:12,int_fict:12,integ:[0,12,18],integr:[2,7,12,21],intel:[2,12],intellig:21,interact:2,interchang:12,interfac:[0,2,8,12],intern:[4,11,15,19],internet:7,interpol:[2,18],interpret:[0,10],intr:12,introduc:[2,7,11,18],introduct:8,invalid:2,invari:11,invoc:7,iok:[1,2,12],iokelex:12,iokesrc:12,iolang:12,iolex:12,iosrc:12,ipf:12,ipython:2,irb:[12,23],irc:[2,7,12,23],ircformatt:7,irclog:12,irclogslex:12,irssi:[12,23],is_token_subtyp:18,isabel:[1,2,12],isabellelex:12,isn:[2,5,6,11,12],iso:12,issu:[2,22],ital:[6,7,12,17],italic:7,item:[6,11,14],iter:[0,2,5,6,7,11,17],itoh:1,its:[0,2,11,12,15,17,19,21],itself:11,ivan:1,jackson:1,jade:[1,12],jag:[1,2,12],jagslex:12,jame:1,jan:2,jann:1,januari:12,jar:10,jarrett:[1,2],jasmin:[1,2,12],jasminlex:12,jasminxt:12,java:[2,8,12,18,21,23],javalex:12,javascript:[2,7,11,18,23],javascriptdjangolex:12,javascripterblex:12,javascriptgenshilex:12,javascriptlex:[2,11,12],javascriptphplex:12,javascriptsmartylex:12,jbst:[1,2,12],jcl:[1,2,12],jcllexer:12,jeffrei:1,jeremi:[1,12],jerith:12,jerom:1,jesper:1,jinja2:2,jinja:[12,23],jlcon:12,jlexer:12,job:12,jochen:1,joe:1,joerg:1,john:1,join:[2,6],jon:1,jona:1,jordi:1,jpeg:7,jpg:[2,7],jpgimageformatt:7,jproperti:12,jsgf:[1,2,12],jsgflexer:12,jsm:12,json:[1,2,12],jsonbareobjectlex:12,jsonld:12,jsonldlex:12,jsonlex:12,jsonml:12,jsp:[12,23],jspeech:12,jsplexer:12,julia:[1,2],juliaconsolelex:12,julialex:12,jun:2,just:[2,6,7,11,12,17,18,22],justin:1,juttl:12,juttlelex:12,jython:[2,10,21],kaarsemak:[1,2],kabak:1,kal:[1,2,12],kallex:12,kalnitski:1,kashif:1,kconfig:[1,2,12],kconfiglex:12,keep:[2,7],kei:[0,6,11,12,17],ken:[1,12],kept:11,kernel:12,keyboardinterrupt:2,keyword:[0,2,4,5,6,12,15,17],keywordcas:[3,5],keywordcasefilt:5,kid:12,kif:12,kind:[2,18],kiril:[1,2],kirk:[1,2],kit:1,kki:12,klein:1,knibb:1,know:[2,15,18,21],knowledg:22,known:[0,3,5,11,17,21],koka:[2,12],kokalex:12,koltsov:1,konrad:1,koprowski:1,korean:12,kotlin:[1,2,12],kotlinlex:12,kowarsch:1,krekel:1,kriegisch:1,kristian:1,krzysiek:[1,2],kschutt:12,ksh:12,kubica:[1,2],kumar:1,kupperschmidt:1,kurt:1,kurzbach:1,label:[2,18],lack:23,lagda:12,lambda:2,lang_builtin:11,languag:[0,2,11,15,18,22],language_lex:11,larger:[7,13],larim:1,lasso:[1,2,12,23],lassocsslex:12,lassohtmllex:12,lassojavascriptlex:12,lassolex:12,lassoscript:12,lassoxmllex:12,lassu:1,last:[3,6,11,12,19],lasttyp:6,lastval:6,later:[6,7,11,12],latest:[20,22,23],latex:[1,2,7,12,15,17,21,22],latexformatt:[2,6,7],latin1:[2,3,19],latin:12,latter:[2,6,7],laurent:1,layman:1,layout:12,lazi:12,lcry:12,lcryptol:12,lead:[2,11,12,15],leaf:1,lean:[2,12],leanlex:12,learn:22,ledru:1,lee:1,left:[6,12],length:[7,11,12],lenient:12,less:[2,4,12],lesscss:1,lesscsslex:12,lessfilt:2,let:[6,12,15],letter:[5,7,12],level:12,lex:[0,2,11,12,21,22],lexem:12,lexer:[1,2,4,5,6,7,8,10,13,14,17,18,19,21,22,23],lexercontext:11,lexernam:0,lgt:12,lhaskel:12,lhs:12,librari:[2,6,7,9,12,15,19,21,22],licens:[2,18,21],lid:[2,12],lidr:12,lidri:12,life:12,light:[6,7,17],lightbg:7,lighti:12,lighttpd:[2,12,23],lighttpdconflex:12,like:[0,2,3,6,7,9,10,11,15,16,18,21,22],limbo:[1,2,12],limbolex:12,linden:12,line:[0,2,5,7,8,11,12,14,18,19,21,22],line_numb:7,line_number_bg:7,line_number_bold:7,line_number_char:7,line_number_fg:7,line_number_ital:7,line_number_pad:7,line_number_separ:[2,7],line_number_start:[2,7],line_number_step:7,line_pad:7,lineanchor:[2,7],lineno:[2,3,7,15],linenospeci:7,linenostart:7,linenostep:7,linenumb:7,linesepar:7,linespan:[2,7],linewis:12,linh:1,link:[2,7,12,21],linux:[12,20],liquid:[2,12],liquidlex:12,lisp:[1,2,12,18,23],list:[0,2,3,5,7,8,12,14,15,20,21,22,23],listen:2,liter:[1,2,6,7,11,12,23],literateagdalex:12,literatecryptollex:12,literatehaskelllex:12,literateidrislex:12,litstyl:12,littl:[2,12],live:12,livescript:[1,2,12],livescriptlex:12,llvm:[2,12,23],llvmlexer:12,load:[0,1,2,3,11,12],load_formatter_from_fil:[0,2],load_lexer_from_fil:[0,2,11],local:[2,3,11,19],locat:2,log:[1,2,12,22,23],logo:[1,2,12],logoslex:12,logtalk:[1,2,12,23],logtalklex:12,longer:[2,11],longest:11,longnam:0,look:[0,3,5,7,10,11,12,15,17,18,21,22,23],lookup:[0,11,14],loop:[2,15],lorentz:1,lot:[2,10,11],loui:1,lovelac:[1,2],lower:5,lowercas:[5,12],lsl:[2,12],lsllexer:12,lsp:12,lua:[1,2,12,23],lualex:[11,12],lubomir:1,luca:1,luka:1,m2iso:12,m2pim:12,m2r10:12,mabei:1,mac:[2,7,12],macarthur:1,machineri:21,macro:[1,6,7],made:[2,11],magenta:17,magic:[2,18],mai:[0,2,3,5,7,11,12,15,17,23],mail:[21,22],main:[3,7,11,12],mainfram:12,mainli:12,maintain:[1,17,22],major:[1,2,20],mak:12,make:[2,4,7,10,11,12,15,17,19],makefil:[2,23],makefilelex:12,mako:[1,2,12,23],makocsslex:12,makohtmllex:12,makojavascriptlex:12,makolex:12,makoxmllex:12,malform:12,malzeug:2,manag:[12,20,21],mandatori:0,mandel:1,mani:[1,2,7,11,16,17,22],manpag:[2,12],manual:12,mao:12,map:[2,7,11,12,13,15,17],mapfil:11,maql:[1,12],maqllex:12,mar:2,marchand:1,marek:[1,2],margaritelli:1,margin:7,mario:1,mark:[1,5,11,18],markdown:[1,2,12],markdownlex:12,marker:12,markup:[2,6,7,13,15,18,21],martin:1,mask:12,maskj:[1,2],masklex:12,mason:[1,2,12,23],masonlex:12,master:12,match:[0,2,11,12,15,18],math:[2,7,12],mathematica:[2,12],mathematicalex:12,mathescap:7,matlab:[1,2,23],matlablex:12,matlabsess:12,matlabsessionlex:12,matt:[1,2],matteo:1,matter:14,matthew:1,matthia:[1,12],mauricio:1,maven:10,mawk:12,max:[1,12],maxim:12,maximum:11,mayb:12,mcdonald:[1,2],mcgregor:1,mckamei:1,mckee:1,mckenna:1,mclaughlin:1,mean:[0,2,3,5,7,11,12,14,16,19,21],meant:18,mechan:[0,7,15,22],media:2,member:[11,12],menlo:7,mention:14,menu:9,menuconfig:12,mercuri:21,merg:[5,11],messag:[12,18],meta:[7,12],metacharact:11,metaclass:[0,6],metadata:14,metamodel:12,method:[0,2,3,4,5,6,7,11,12,15,18],meuser:1,mher:1,mhtml:12,michael:1,michiel:1,micro:12,microsoft:7,middl:5,might:[6,11,12,17,19],miikka:1,mike:1,miller:1,mime:[0,2],mimelex:12,mimetyp:[0,2,12],minhe:1,minid:[1,2,12,23],minidlex:12,minidsrc:12,minim:[6,12],minimum:2,minor:2,mior:1,mirc:1,misc:2,misdetect:2,mishandl:2,mishighlight:2,miss:[2,11,12],mit:12,mitchen:1,mix:12,mixtur:12,mli:12,mll:12,mly:12,mma:12,mod:12,mode:[2,7,12],modelica:[1,2,12,23],modelicalex:12,modelin:[1,2],modelvers:10,modif:11,modifi:[0,7,15],modul:[0,2,4,5,7,11,12,14,15,18],modula2:12,modula2lex:12,modula:[1,2,23],modulo:12,mof:12,moin:[2,12,13],moinmoin:[2,8,12,23],moinwikilex:12,mondrian:17,mondrianstyl:17,monkei:[2,12],monkeylex:12,mono:[2,7],monofont:7,monokai:[1,2],monospac:7,mont:[1,2],montelex:12,moo:12,moocod:[1,2,12],moocodelex:12,moon:12,moonscript:[1,2,12],moonscriptlex:12,morai:1,more:[2,3,6,7,10,11,12,15,22],morton:1,most:[0,2,6,7,11,12,17,20,22],moura:1,move:12,movsisyan:1,mozhashpreproc:12,mozilla:[2,12],mozpercentpreproc:12,mozpreproc:12,mozpreproccsslex:12,mozpreprochashlex:12,mozpreprocjavascriptlex:12,mozpreprocpercentlex:12,mozpreprocxullex:12,mq4:12,mq5:12,mqh:12,mql4:12,mql5:12,mql:[2,12],mqllexer:12,msc:12,mscgen:[1,2,12],mscgenlex:12,msdo:[1,2,12],msdossessionlex:12,much:[2,11,12],muhamedag:1,mulitpart:12,multi:[2,11,18,21],multilin:[2,11,18],multipart:12,multipl:[2,7,12],mupad:[1,2,12,23],mupadlex:12,must:[0,3,5,6,7,11,12,15,17],muthiah:1,mxml:[1,2],mxmllexer:12,myghti:[1,2,12,23],myghtycsslex:12,myghtyhtmllex:12,myghtyjavascriptlex:12,myghtylex:12,myghtyxmllex:12,mygthi:12,mylex:11,mynewlex:11,mypythonlex:11,mysql:[2,12,23],mysqllex:12,mystyl:17,myt:12,nafu:7,nam:1,name:[0,2,4,5,7,11,12,13,14,15,17],namehighlightfilt:[2,5],namespac:[1,2,18],nasm:[2,12],nasmlex:[2,12],nasmobjdumplex:12,nathan:1,nativ:[7,12],naveen:1,nawk:12,nbp:12,nbsp:18,ncar:2,ncl:12,ncllexer:12,nearest:7,nearli:2,necessari:7,need:[0,2,4,6,7,11,12,14,15,21,22],needl:11,neg:12,nelson:[1,2],nemerl:[1,2,12,23],nemerlelex:12,neo4j:12,nesc:[1,2,12],nesclex:12,nescsrc:12,nest:[2,11,12],net:[2,23],neufeld:1,neujahr:2,never:11,nevertheless:11,new_stat:11,newest:2,newli:[11,18],newlin:[2,5,7,11,12],newlisp:[2,12],newlisplex:12,newspeak:[2,12],newspeaklanguag:12,newspeaklex:12,next:[11,12],ng2:12,nginx:[2,12,23],nginxconflex:[2,12],nguyen:1,nick:1,nil:[1,12],nimrod:[1,2,23],nimrodlex:12,nit:[1,2],nitlex:12,nix:[1,2,7],nixlex:12,nobackground:7,nobodi:18,nobold:17,noclass:[2,7],noclobber_cssfil:[2,7],noehr:1,noinherit:17,noital:17,nolta:1,non:[2,3,7,11,17,19],none:[0,4,6,7,11,12,18],nonempti:7,nontermin:12,nonzero:5,normal:[0,3,4,5,11,12,18],norman:1,north:1,nose:2,notabl:12,note:[4,5,7,11,12,14,15,17,18,19,23],notebook:2,noth:11,notifi:22,notmuch:[2,12,23],notmuchlex:12,nounderlin:17,nov:2,now:[2,7,11,12,19],nowrap:7,nprint:15,nqp:12,nresult:10,nroff:12,ns2:12,nsh:12,nsi:[2,12],nsislex:12,nth:7,nullformatt:[6,7],number:[2,3,5,6,7,11,12,15,17,18,21,22],numer:[2,12],numpi:[1,2,12],numpylex:12,nusmv:[1,2,12],nusmvlex:12,nvidia:12,obei:7,obj:12,objc:[2,12],objdumb:12,objdump:[2,12],objdumplex:12,object:[0,1,2,5,6,7,11,15,18,19,23],objectivec:[1,2,12],objectiveclex:12,objectivecpplex:12,objectivej:12,objectivejlex:12,objectpasc:12,objj:12,objm2:12,obrist:1,obviou:11,ocaml:[2,12,23],ocamllex:12,occasion:19,occur:11,oct:[2,18],octal:18,octav:[1,2,12,23],octavelex:12,odbc:12,odd:2,odin:12,odinlex:12,off:[0,5,12],offload:2,offset:7,often:[11,16],old:[2,17,18],oldhtmlformatt:6,oleh:1,oliva:1,olivi:1,olov:1,omg:12,omit:3,onclick:7,one:[0,2,3,5,6,7,11,12,15,17,19],ones:[0,11,12],onli:[0,2,3,7,11,12,15,18,21],onto:11,ooc:2,ooclex:12,opa:[1,2,12],opalang:12,opalex:12,open:[6,11,14,22],openbug:12,opencobol:[1,2,12],openedg:[1,2,12],openedgelex:12,opengl:12,openingcommentdelim:12,oper:[1,2,11,17],optim:[2,11],option:[1,2,4,5,6,9,11,12,13,17,19],optionerror:0,optnam:0,orc:12,orchestra:12,order:[2,12,15,21],ordinari:12,org:[1,10,12,22],origin:[2,12],other:[1,2,7,11,13,17,18,19,21,22],otherlex:11,otherst:11,otherwis:[0,7,11,12],out:[2,3,5,10],outencod:[2,3,7,19],outfil:[0,6,7,15],outlin:21,output:[0,2,3,4,5,6,7,11,13,15,18,19,21,22],outsid:[11,12],over:[0,2,6,7,11,15,17,22],overhaul:1,overload:18,overrid:[0,2,3,4,6,7,11,12,19],overridden:0,overview:[15,17],overwrit:6,overwritten:7,owen:1,own:[0,5,7,8,18,23],oxford:1,ozarowski:2,p6l:12,p6m:12,pace:22,pacemak:12,packag:[2,7,11,18],pacman:12,pacmanconf:12,pacmanconflex:12,pad:7,page:[0,2,7,10,11,13,14,15,23],paid:22,paint:21,pair:[0,11],pan:[2,12],panlex:12,pannuto:1,paper:12,paramet:[6,11],parasail:[1,2],parasaillex:12,paren:2,parent:[6,11,18],parenthesi:11,paris:1,pars:[2,6,12,21],parser:[2,13,18],part:[6,11,12,18],partial:2,particular:[3,7,12],partner:12,pas:[3,12],pascal:[2,3,5],pass:[2,3,4,6,7,11,12,15,17,19],past:[7,12],pastebin:[12,21],pastebox:21,pat:1,patch:[1,2,18],path:[7,11,12,18,21],patrick:1,pattern:[0,2,7,11,12,14,15],patx:21,paul:1,paulo:1,pawn:2,pawnlex:12,pcmk:12,pdf:[12,21],peculiar:2,pekka:1,peopl:16,pep:2,pepijn:1,per:[2,3,12,17,19],percent:12,perfect:7,perform:[2,11,12,18],perl6:12,perl6lex:12,perl:[1,2,15,18,22,23],perllex:[12,15],permit:12,persist:12,pete:1,peterson:1,pfannschmidt:1,pgsql:12,phil:1,philosophi:17,php3:12,php4:12,php5:12,php:[1,2,9,11,18,21,23],phpbb:7,phplexer:[11,12],phpygment:[9,21],phtml:12,picheta:1,pick:7,pida:21,piec:[7,15],pierr:[1,2],pig:[1,2,12],piglex:12,pigment:21,pike:[1,2,12],pikelex:12,pil:[2,7],pilcrow:5,pim:12,pinkham:1,piotr:2,pip:[2,20],pipe:[12,15],pixel:7,pixmap:7,pkg:12,pkg_resourc:2,pkgbuild:12,pkgconfig:12,pkgconfiglex:12,pl6:12,place:[2,12,20,22],plain:[2,12,15],platform:[12,20,21],player:12,pleas:[7,8,14,21],plot:2,plpgsql:12,plpgsqllexer:12,plrm:12,plt:12,plu:[2,7,11,12,23],plugin:[8,12,13,17,21],pm6:12,pmod:12,png:[2,7],pocoo:22,pod:2,point:[5,7,11],polici:12,pom:10,poni:[2,23],ponylex:12,pop:[2,11],popular:12,port:2,pos:11,posh:12,posit:[0,2,11],posix:12,possibl:[2,6,7,10,11,12,15,18,19,21],post:7,postgr:12,postgresconsolelex:12,postgreslex:12,postgresql:[1,2,12],postmarkup:21,postscr:12,postscript:[1,2,12,23],postscriptlex:12,pot:12,pov:[2,12,23],povrai:[12,23],povraylex:12,power:[11,21],powershel:[1,2,12,23],powershelllex:12,powershellsessionlex:12,praat:[1,2],praatlex:12,pre:[2,6,7,10,12,15],preambl:7,preced:[2,11],prefer:11,prefix:[2,7,11,12,14,18],preimplement:11,prepar:11,prepend:[3,7],preproc:18,preprocess:12,preprocessor:[2,9,11,12,18],present:[2,12,21],preserv:7,prestyl:[2,7],prettifi:22,prevent:[11,17],previou:11,previous:2,prg:12,primari:[12,15],primit:12,principl:18,print:[3,7,10,12,15,17,18,21],printabl:12,println:10,prioriti:2,privaci:21,pro:[1,2],probabl:[0,4,12],problem:[2,7],proc:12,procedur:[2,11,12],process:[2,3,11],processor:[1,2,9],produc:[0,2,7,11,12,15],profil:12,progopedia:21,program:[2,16,18,22],progress:12,project:[2,7,10,17,21,22],prolog:[2,23],prologlex:12,prompt:[2,12,18],proof:12,proper:11,properli:[2,21],properti:[2,7,12],propertieslex:12,propos:18,proprietari:12,proto:2,protobuf:12,protobuflex:12,protocol:[1,2,12],prototyp:12,prover:12,provid:[0,2,3,6,7,9,11,12,17],prs:12,prynn:1,prypin:1,ps1:12,ps1con:12,psc:12,pseudo:[11,12,18],psi:12,psl:12,psm1:12,psql:12,pth:2,publicli:11,publish:12,pudb:21,pug:12,puglex:12,pull:[2,23],pumbaa80:2,punctuat:[2,12],puppet:[1,2,12],puppetlex:12,pure:12,purpos:[6,12,15],push:11,put:[7,11,12,13,17],pwn:12,pxd:12,pxi:12,py2:12,py2tb:12,py3:12,py3tb:12,pybtex:12,pycon:12,pygment:[1,3,4,5,6,7,11,14,15,17,18,19,20,22,23],pykleur:2,pypi:[1,2,7,12,20],pypylog:12,pypyloglex:12,pyrex:12,pytb:12,python2:12,python2lex:[2,12],python2tracebacklex:12,python3:[2,12],python3lex:[2,12,17],python3tracebacklex:[2,12],python:[1,2,3,7,9,10,11,13,15,16,18,20,21,22,23],pythonconsolelex:[2,12],pythoninterpret:10,pythonlex:[2,5,10,11,12,15,19],pythontracebacklex:[2,12],pyw:12,pyx:12,qbasic:[2,12],qbasiclex:12,qbs:12,qbzr:21,qdeclarativeintroduct:12,qml:[1,2,12],qmllexer:12,qualifi:12,qualiti:22,quickstart:[2,8],quit:[2,4,7,21,22],quot:[2,3,10,15,18],quotat:5,qvt:[1,2],qvto:12,qvtolex:12,r10:12,r5r:12,rabel:1,racket:[1,2,12],racketlex:12,ragel:[1,2,12,23],ragelclex:12,ragelcpplex:12,rageldlex:12,ragelembeddedlex:12,rageljavalex:12,ragellex:12,ragelobjectiveclex:12,ragelrubylex:12,rai:[2,23],raichoo:1,rainbow:[1,2],rais:[0,2,5,7,12],raiseonerror:5,raiseonerrortokenfilt:[2,5],rake:12,rakefil:12,rang:[0,12,21,22,23],rare:2,rasul:1,rather:11,raw:[2,7,18],rawtokenformatt:[2,7,12],rawtokenlex:[7,12],raytrac:12,rbcon:12,rbnf:12,rbw:12,rbx:12,rconsol:12,rconsolelex:12,rdlexer:12,rdoc:2,read:[7,11,12,22],readabl:0,readili:6,realli:[7,11,12,14,18],reason:12,reb:12,rebol:[1,2,23],rebollex:12,rebuild:11,receiv:2,recent:12,recogn:[0,2,3,11,12],recognis:[7,12,18],recognit:2,recommend:[7,20],record:12,recreat:12,recurs:11,recurss:12,red:[2,6,7,12,15,17,23],redcod:[1,2,12,23],redcodelex:12,redlex:12,reduc:2,redund:15,reed:1,refactor:1,refer:[2,4,6,7,8,11,12,15],reg:12,regard:3,regedit:12,regeditlex:12,regex:[2,18,22],regist:[0,2,4,5,8,12,13,17],registri:[2,12],regress:2,regular:[2,7,11,18,19],reidi:1,rel:[0,3,7,11],relas:2,relaxng:12,releas:[2,20,22,23],reli:14,remov:[2,7,11],renam:[12,13],render:[2,7,9,11,12,17],renviron:12,repeat:11,repl:12,replac:[2,5,7,11],report:12,repositori:21,repr:7,repres:[15,18],represent:7,request:[2,7,21,23],requir:[2,11,12,14,22],requiredelimit:12,reserv:[12,18],resolv:11,resort:[3,19],resourcebundl:[2,12],resourcelex:12,respect:[2,7,17],respons:13,rest:[2,5,8,11,12,22,23],restrict:12,restructur:2,restructuredtext:[2,12,21],result:[0,2,7,10,11,12,15,17,19],retain:2,reuben:1,review:21,revis:12,rewrit:[1,2],rewritten:12,rewrot:2,rex:12,rexx:[1,2,12],rexxlex:12,rfc822:12,rgb:17,rhistori:12,rhtml:12,rhtmllexer:12,richard:1,richardson:1,right:[5,19],rigor:12,rintel:1,risc:12,rkt:12,rktd:12,rktl:12,rnc:12,rnccompactlex:12,rng:12,rob:1,roberg:[1,2],robert:1,roboconf:[1,2],roboconfgraphlex:12,roboconfinstanceslex:12,robot:[1,2,23],robotframework:12,robotframeworklex:12,roff:12,rolling:1,roman:2,ronach:[1,22],ronni:1,roo:1,root:[7,11,12],root_lex:11,rostyslav:1,roughli:7,rout:12,roux:21,row:7,rpf:12,rpm:[2,12,23],rpmspeclex:12,rprofil:12,rql:[2,12],rqllexer:12,rrggbb:6,rrt:2,rsl:[1,2,12],rsllexer:12,rss:12,rst2pdf:21,rst:[12,16,18],rstlexer:12,rtf:[1,2,7,12,15,21,22],rtfformatt:7,rts:12,rtslexer:12,rubi:[1,2,9,11,18,21,23],rubiniu:12,rubyconsolelex:[2,12],rubylex:[12,15],rudolph:1,ruggier:1,rule:[2,3,7,11,12,13,18],run:[0,7,10,11,12,21],runtim:10,runtimeerror:[7,18],rust:[1,2,23],rustlex:12,rvt:12,sage:12,salminen:1,salt:12,saltstack:12,sam:1,same:[2,3,5,6,7,11,12,17,18],sampl:[9,11,13,16],samplemanag:12,san:[2,7],sandalski:1,sane:7,sap:12,sarl:12,sarllex:12,sas:12,saslex:12,sass:[1,2,12],sasslex:12,sasso:1,save:[2,11],sbatch:12,sbl:12,scala:[1,2,12,23],scalalex:12,scalat:12,scaml:[1,12],scamllex:12,scd:12,scdoc:[2,23],scdoclex:12,sce:12,scenario:8,scene:[2,23],schafer:1,schemaloc:10,scheme:[1,2,12,23],schemelex:12,schutt:[1,12],schwaiger:1,schweizer:1,schweyer:1,sci:12,scientif:[12,21],scilab:[1,2,12,23],scilablex:12,scm:12,sco:12,sconscript:12,sconstruct:12,scope:[10,21],score:[2,12],screen:13,script:[2,3,11,14,15,16,21,23],scss:[2,12],scsslexer:12,search:[2,11],sebastian:1,second:[2,11,12],secondari:15,section:[11,21],see:[0,2,3,5,6,7,11,12,15,17,19,21,22],seem:11,seen:3,sel4:12,select:[0,2,3,7,11,12,15],selector:[2,3,7,15],self:[0,4,6,7,11,12,18],semant:15,semicolon:2,send:[6,11,12,19,22],sensit:2,sep:2,separ:[2,3,7,11,12],sequenc:[0,2,7,15,17,18,21,22],sequenti:12,serial:12,server:[2,12,23],servic:[1,21],session:[1,2,23],set:[2,6,7,9,10,11,12,13,15,17,18,19],setup:3,setuptool:14,sever:[2,11,12],sgf:[1,2,23],shade:17,shader:[12,23],shadow:12,shape:12,share:21,shaw:1,sheet:[3,12],shell:[1,2,3,21,23],shellscript:12,shellsess:[1,12],shellsessionlex:2,shen:[1,2,12],shenlex:12,shex:12,shexc:12,shexclex:12,ship:[6,9,11,13,16,17],shorten:12,should:[0,2,5,6,7,11,12,13,14,17],shouldn:[2,11],show:[2,7,11,12],shown:[7,11],siafoo:21,sieker:1,sig:12,sign:[2,3,5,11],signatur:18,sil:12,silent:2,silver:[2,12],silverlex:12,similar:[2,11,15,18],simmon:1,simon:1,simonov:[1,2],simpl:[2,9,10,11,22],simplefilt:4,simpli:11,simplifi:2,simplist:12,simpson:1,sinc:[0,2,3,4,7,9,10,11,12,15,17,19],sing:1,singl:[2,11,18,19],singlelin:11,singleton:18,sircmpwn:12,site:21,size:[6,7],skip:11,skylark:2,slash:[1,2,11,14,23],slashlex:12,slexer:[2,12],slightli:2,slim:[2,12],slimlex:12,slowdown:12,slowish:2,sls:12,slurm:[2,12,23],slurmbashlex:12,smali:[1,2,12],smalilex:12,small:[2,12,15,17],smaller:2,smalltalk:[1,2,23],smalltalklex:12,smart:[3,19],smarter:2,smartgameformatlex:12,smarti:[12,23],smartylex:12,smishlajev:1,sml:12,smllexer:[11,12],snapshot:10,snippet:[12,15,16,17,21],snobol4:12,snobol:[1,2,23],snobollex:12,snowbal:[1,2,12],snowballlex:12,softwar:[1,12],sol:12,solar:[1,2],solid:[2,23],soliditylex:12,solvabl:11,some:[0,2,4,6,7,11,12,15,17,18,21],somelex:[3,11],someth:[6,10,11,15,18],sometim:[7,11],somewhat:12,sound:11,sourc:[1,2,5,7,9,11,13,14,15,16,19,21,22],sourcecod:[2,7,12,16],sourcepawn:[1,2,12],sourcepawnlex:12,sourceslist:12,sourceslistlex:12,space:[2,3,5,7,11,12,17,23],spacehack:7,spam:[11,15],span:[2,3,7,10,15],sparql:[1,2,12],sparqllex:12,spec:[2,12,23],special:[2,3,5,7,11,18,22],specif:[2,3,7,11],specifi:[0,3,6,7,11,12,17,18],speed:2,spell:11,sphinx:[2,21],spigarelli:1,spitfir:[2,12],split:[0,2,3,12,15,17,18],splitlin:2,splu:12,spt:12,spyder:21,sql:[1,2,23],sqlite3:[2,12],sqlite:23,sqliteconsolelex:12,sqllexer:12,squeak:12,squid:[1,2,12,23],squidconf:12,squidconflex:12,squiggli:2,src:12,ssp:[2,12],ssplexer:12,stabl:22,stack:[2,11],stan:[1,2,12],stand:21,standalon:10,standard:[0,1,2,3,6,7,12,15,21],standard_typ:18,standardml:12,stanlex:12,stap:1,star:11,starlark:2,start:[0,2,6,7,8,11,12,18],starter:11,startinlin:12,stata:[1,2],statalex:12,state1:11,state2:11,state:[2,12],statement:[0,2,12],staticmethod:0,statist:2,statu:12,stdin:12,stdout:[3,19],stefan:[1,12],step:[7,11,21],stepan:1,stephen:1,steve:1,steven:1,still:[2,7,11,12,17,18,19],stingrai:12,stolen:12,store:[7,11,12,21],stou:1,strachan:1,straightforward:6,strang:11,stream:[0,2,3,4,5,6,7,12,15,19],strict:2,string:[0,2,3,5,6,7,10,11,12,15,17,18,19],string_to_tokentyp:18,stringio:7,strip:[11,12,15],stripal:[0,12,15],stripnl:[0,2,12],strong:18,strongli:12,structur:[2,12],stuart:1,studio:[2,12],stuff:7,style:[0,1,2,5,7,8,11,14,15,18,23],style_map:[0,17],stylebegin:6,styleend:6,styleguid:5,stylemap:6,stylenameclass:17,stylesheet:[2,3,7,13,15],styleshet:12,subclass:[0,2,7,12,17],subdirectori:13,subhead:[11,18],subheadlin:18,submit:8,subpackag:[2,17],subsequ:[0,7,12],subset:12,subsystem:12,subtoken:17,subtyp:18,successor:[21,23],suffix:[2,11],suggest:8,suit:[1,2,11],suitabl:[0,2,7,22],sullivan:1,supercollid:[1,2],supercolliderlex:12,superset:12,supertoken:17,suppli:[12,18],support:[1,2,3,6,7,11,12,15,16,17,18,22],suppos:[7,15],suppress:2,sure:[3,11,12,22],surpris:2,sven:1,svg:[1,2,7,12],svgformatt:7,svh:12,svn:12,swallow:2,swg:12,swift:[1,2,12],swiftlex:12,swig:[1,2,12],swiglex:12,sybas:12,sylvestr:1,symbol:[2,12,18],synonym:11,syntact:[18,21],syntax:[2,3,7,11,17,18,21,22],syntaxerror:2,sys:[3,19],system:[3,7,10,11,20],systemverilog:[1,2,12],systemveriloglex:12,sysutil:12,tab:[2,5,7,12],tabl:[2,7],tabsiz:[0,5,12],tac:12,tad:[1,2,12],tads3:12,tads3lex:12,tag:[2,3,5,6,7,11,12,15,18],tagsfil:7,tagurlformat:7,tail:12,take:[0,2,5,7,11,12,15,20,21,22,23],taken:[0,2,12],tamil:[12,23],tango:[1,2],tanner:1,tap:[1,12],taplex:12,tarbal:[2,11],target:12,task:12,tasm:12,tasmlex:[2,12],tassilo:1,tcl:[1,2,23],tcllexer:12,tcsh:[1,2,12,23],tcshcon:12,tcshlexer:12,tcshsessionlex:12,tea:[1,2,12,23],team:[17,22],teatemplatelex:12,techniqu:11,ted:1,tell:[7,11],templat:[0,2,11,15],tenani:1,teng:[1,2],tera:[1,2,23],teraterm:[2,12],teratermlex:12,teratermmacro:12,term:[1,2,11,15,23],termcap:12,termcaplex:12,termin:[1,2,3,7,12,15,19],terminal16m:7,terminal256:[1,2,7],terminal256formatt:[7,17],terminalformatt:[2,3,7],terminaltruecolorformatt:7,terminfo:12,terminfolex:12,ternari:2,terraform:[1,2,12],terraformi:12,terraformlex:12,test:[1,2,3,15,18],testcas:7,testcaseformatt:7,tex:[7,12,23],texcom:7,texlex:12,text:[0,2,3,7,11,15,16,17,18,19,21,22],textedit:12,textfmt:12,textlex:12,than:[2,9,11,17],thank:[1,2,7,10],thei:[0,2,6,7,11,12,14,15,17,18],them:[0,2,5,7,8,11,12,15,18,21,22,23],theme:[2,17],themselv:11,theori:7,therefor:[3,7,12],thi:[0,2,3,5,6,7,10,11,12,13,14,15,16,17,18,19,20,22,23],thing:11,think:[0,19],third:11,thoma:1,thoroughli:21,those:[6,11,12,14,18],though:[12,21],three:2,thrift:[1,2,12],thriftlex:12,through:[3,11,15],thu:[7,17],thurgood:[1,12],thy:12,tiberiu:[1,2],ticket:[2,22],tiffin:1,tim:[1,2,22],time:[2,7,11],timhatch:1,timothi:1,titl:[0,7],tmp:11,tmpl:12,toc:12,todo:[2,5,12],todotxt:12,todotxtlex:12,togeth:[7,12,15],toggl:7,token:[0,2,3,4,5,6,7,8,12,15,17],token_typ:6,tokenmerg:5,tokenmergefilt:5,tokensourc:[0,6],tokenstr:7,tokentyp:[0,5,7,11],tolbert:1,toler:11,tom:1,toml:[1,2,12,23],tomllex:12,too:[2,3,11,12],tool:[2,12,21,22],top:[7,11,13],toplevel:11,topmost:11,total:[5,11],totaldownload:12,tpl:12,trac:[2,12,21,23],traceback:[1,2,12,18,23],tracker:[2,21,22],traffic:12,trafficscript:1,trail:[12,15],trailer:2,trait:12,transact:[1,12],transactsqllex:12,transcript:12,transfer:12,transform:12,translat:2,transpar:17,treat:[2,12,17],treat_stdlib_adts_as_builtin:12,tree:12,treetop:[1,2,12],treetoplex:12,trevor:1,tri:[2,3,11,19],trick:[8,15],tricki:11,trigger:7,troff:12,trove:2,trust:11,trute:1,tryzelaar:1,tspan:7,tsql:12,tst:12,tsx:12,ttl:12,ttype:[4,6],tupl:[0,6,7,11,12],turbo:12,turbopasc:12,turn:12,turtl:[1,2,12],turtlelex:12,tutori:10,twig:[2,12],twightmllex:12,twiglex:12,two:[6,7,11,12,15],twowaybind:12,txt:[2,7,12],type:[0,2,5,6,7,11,12,15,17,18],typescript:[1,2,12],typescriptlex:12,typeset:[7,12],typic:12,typo3:12,typo3cm:12,typoscript:[1,2],typoscriptcssdata:12,typoscriptcssdatalex:12,typoscripthtmldata:12,typoscripthtmldatalex:12,typoscriptlex:12,typoscriptrefer:12,ucodelex:12,udalov:1,udiff:[2,12],udo:12,unabl:11,unbalanc:12,unchang:7,uncolor:4,uncolorfilt:4,under:[11,20,21],underlin:[6,7,12,17],underscor:2,understand:[7,14,17],undocu:12,unexpect:19,unfold:12,unfortun:11,unhandl:12,unicod:[2,5,7,8,11,12,15],unicodedecodeerror:19,unicodeerror:2,unicodelevel:12,unicon:2,uniconlex:12,unifi:[2,12],uniqu:[0,15],unistr:2,unit:[2,12],univers:[1,21],unix:12,unknown:12,unless:7,unlex:12,unlimit:[12,18],unmatch:12,unmodifi:11,unnecessari:11,unnecessarili:12,unpack:11,unquot:2,unsign:2,unstyl:18,until:11,untouch:12,unus:18,updat:[1,2,7,12],upper:[3,5],uppercas:[5,15,18],urbi:12,urbiscript:[1,2],urbiscriptlex:12,usabl:[0,6,17,21,22],usag:[0,3,21],use:[2,3,4,5,6,7,10,11,12,13,14,15,16,17,18,19,22],used:[0,2,3,4,5,6,7,11,12,13,14,15,17,18,21],useful:[2,5,7,12,18,21],usepackag:7,user:[0,7,9,12],uses:[0,2,4,7,9,10,11,15],usesyslog:12,using:[0,2,3,4,7,11,12,15,16,17,18,19,20,21],usr:15,usual:[7,15,20],utf8:2,utf:[2,7,10,12,19],util:[0,2,4,21],v4_0_0:10,vala:[1,2,12,23],valalex:12,valentin:1,valid:[0,12,21],vallentin:1,valu:[0,2,3,4,5,6,7,11,12,14,17,18],valueerror:2,van:1,vapi:12,vari:17,variabl:[2,7,10,11,12,18],variant:[12,17],varieti:17,variou:[0,2,8,23],vark:12,varnish:[1,2],varrazzo:1,varun:1,vba:12,vbnet:12,vbnetaspxlex:12,vbnetlex:12,vbs:12,vbscript:[1,2,12],vbscriptlex:12,vcl:12,vcllexer:12,vclsnippet:12,vclsnippetlex:12,vclsrc:12,vctreestatu:12,vctreestatuslex:12,veloc:[2,12],velocityhtmllex:12,velocitylex:12,velocityxmllex:12,vera:7,verbatim:[2,7],verbopt:7,verbosepkglist:12,veri:[0,4,11,12,22],verilog:[2,12,23],veriloglex:12,version:[0,3,4,5,7,10,11,12,15,17,18,19,21,23],versionad:12,vert:12,vfp:12,vgl:[1,2,12],vgllexer:12,vhd:12,vhdl:[1,2,12,23],vhdllexer:12,via:[7,9,12,19],view:[11,12],viewer:[7,21],viewvc:21,vim:[1,2,12,23],viml:12,vimlex:12,vimrc:12,vincent:1,vinot:1,virtualenv:2,visibl:[2,5],visiblewhitespacefilt:[2,5,18],vision:12,visit:15,visual:[1,2,23],vnd:12,voelker:1,volunt:22,vpr:12,wai:[2,7,11,17,18,19],wait:21,want:[3,5,7,10,11,12,13,14,15,17,18],wasn:11,watch:23,wavemetr:12,wdiff:[1,2,12],wdifflex:12,web:[11,21],webmisc:12,websit:2,weechat:[2,12],weechatlog:12,weight:[7,15],weizenbaum:1,welcom:[15,23],well:[2,6,7,12,21,23],were:[2,14],what:[2,3,15],wheel:2,when:[2,5,7,11,12,15,17,22],where:[0,6,7,11,13,15,17],whether:[0,2,7],whetsel:1,which:[0,2,3,5,7,10,11,12,14,15,17,18,19,20],whilei:[1,2],whileylex:12,white:17,whitespac:[0,2,5,7,11,12,15,17,18],whitnei:1,whole:[6,7,11],whose:[2,12,13,15],why:[4,21,23],wide:[12,21,22],width:7,wiki:[2,12,13,21,22,23],wikipedia:21,wildcard:3,william:1,willing:13,winbatch:12,winbug:12,window:[2,7,12,23],winkler:1,winner:2,winston:2,winter:[1,12],wish:21,within:[0,7,12],without:[0,2,7,11,12,14,18],wlua:12,wolfram:12,won:[3,4,7,11],word:[2,3,5,7,11,12,18],wordpress:21,work:[2,3,5,7,10,11,12,15,17,20],workaround:7,workspac:12,world:[3,10,15,17],would:[5,6,7,10,11,12,18],wouldn:7,wppygment:21,wpygment:21,wrap:[2,6,7,15],wrapcod:7,wrapper:[2,7,21],write:[0,2,5,7,8,12,14,15,18,19,21,23],written:[0,1,2,3,7,11,12,14,15,17,19,21],wrong:19,wsdl:12,wsf:12,wstokentyp:5,www:[10,12],wxhtmlpygment:21,wybir:1,x10:1,x10lexer:12,x1b:17,x1z:12,xbase:12,xchat:[2,12],xcode:2,xds:12,xhtml:[12,21],xmi:12,xml:[2,7,10,18,23],xmldjangolex:12,xmlerblex:12,xmllexer:12,xmln:10,xmlphplexer:12,xmlschema:10,xmlsmartylex:12,xoffset:7,xorglex:12,xpl:12,xql:12,xqm:12,xqueri:[1,2,12,23],xquerylex:12,xqy:12,xsd:[10,12],xsi:10,xsl:12,xslt:[1,2,12,23],xsltlexer:12,xten:12,xtend:[1,2,12],xtendlex:12,xtlang:12,xtlanglex:12,xtm:12,xul:12,xwiki:21,xxd:12,xxx:5,yai:2,yaml:[1,2,12,23],yamljinjalex:12,yamllex:12,yellow:17,yes:[0,11],yet:3,yield:[0,2,4,6,7,11,12],yml:12,yoffset:7,you:[0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22],young:1,your:[3,5,7,8,9,12,13,14,16,17,18,23],your_formatt:3,your_lex:[3,11],your_lexer_fil:11,your_named_lex:11,yourapp:17,yourfilt:14,yourformatt:14,yourlex:14,yourmodul:[14,17],yourself:[11,21],yourstyl:[14,17],ystep:7,zamboni:1,zamudio:1,zeek:[2,12,23],zeeklex:12,zeitdilat:2,zep:12,zephir:[12,23],zephirlex:12,zero:12,zerodivisionerror:12,zig:[2,23],ziglang:12,ziglex:12,zimin:1,zimmerman:1,zimtstern:2,zip:12,zsh:12,zshrc:12,zurczak:1},titles:["The full Pygments API","Full contributor list","Pygments changelog","Command Line Interface","Write your own filter","Filters","Write your own formatter","Available formatters","Pygments documentation","Using Pygments in various scenarios","Use Pygments in Java","Write your own lexer","Available lexers","Using Pygments with MoinMoin","Register Plugins","Introduction and Quickstart","Using Pygments in ReST documents","Styles","Builtin Tokens","Unicode and Encodings","Download and installation","Pygments FAQ","Welcome!","Supported languages"],titleterms:{"0rc1":2,"6rc1":2,"class":[7,11],"new":[11,21],Adding:11,RTS:12,SAS:12,The:[0,11,14,22],Use:[10,14],Using:[4,9,11,13,16],actionscript:12,advanc:11,algebra:12,all:[12,23],ambienttalk:12,ampl:12,api:0,apl:12,archetyp:12,architectur:15,assembl:12,author:22,autom:12,avail:[7,12,17],bash:9,basic:12,bibliographi:12,bibtex:12,bnf:12,boa:12,bug:21,builtin:[5,17,18],busi:12,callback:11,can:21,cap:12,chang:11,changelog:2,chapel:12,clean:12,code:12,come:21,command:[3,12,15],comment:18,common:7,compact:12,complet:9,comput:12,config:12,configur:12,consol:12,contribut:22,contributor:1,core:14,creat:17,crystal:12,csound:12,css:12,custom:3,dalvik:12,data:12,decor:4,definit:[6,12],deleg:11,deriv:11,descriptor:12,develop:20,dialect:12,diff:12,document:[8,16],doe:21,domain:12,download:20,dsl:12,dump:12,dylan:12,ecl:12,eiffel:12,elm:12,embed:12,encod:[3,19],engin:12,entrypoint:14,erlang:12,esoter:12,exampl:15,extend:14,extendedregexlex:11,extens:12,ezhil:12,factor:12,famili:12,fantom:12,faq:21,featur:21,felix:12,fiction:12,file:12,filter:[3,4,5],flag:11,floscript:12,format:12,formatt:[0,3,6,7,15],formerli:12,forth:12,fortran:12,foxpro:12,framework:12,freefem:12,from:[11,21],full:[0,1],game:12,gener:[3,6,12,18],get:[3,17],googl:12,grammer:12,graph:12,graphic:12,guess:15,handl:11,hardwar:12,haskel:12,hax:12,help:3,hexadecim:12,high:0,how:[14,21],html:[6,12],icon:12,idl:12,igor:12,includ:12,inferno:12,instal:[12,20],interact:12,interfac:3,intermedi:12,internet:12,introduct:15,iter:12,ivl:12,java:10,javascript:12,julia:12,jvm:12,keyword:[11,18],known:12,languag:[9,12,21,23],level:0,lexer:[0,3,11,12,15],like:12,line:[3,15],lispi:12,list:[1,11,17],liter:18,lookup:15,macro:12,mail:12,make:21,makefil:12,man:12,markdown:9,markup:[12,23],matlab:12,microsoft:12,mime:12,misc:12,model:12,modifi:11,modula:12,moinmoin:13,mont:12,multi:12,multipl:11,multipurpos:12,mxml:12,name:[3,18,21],ncar:12,net:12,nim:12,nimrod:12,nit:12,nix:12,nixo:12,non:12,notat:12,note:3,oberon:12,object:12,onc:11,ooc:12,oper:[12,18],option:[0,3,7,15],orient:12,other:[9,12,23],output:12,over:12,own:[4,6,11,17],packag:[12,20],page:12,parasail:12,parser:12,pascal:12,patch:12,pawn:12,perl:12,php:12,plot:12,plugin:14,poni:12,praat:12,pro:12,process:[0,12,21],program:[12,21,23],prolog:12,proto:12,prove:12,punctuat:18,pygment:[0,2,8,9,10,12,13,16,21],python:12,queri:12,quickstart:[6,15],qvt:12,raw:12,rdf:12,rebol:12,regex:11,regexlex:11,regist:14,relat:12,relax:12,report:21,requir:21,resourc:12,rest:16,riverb:12,roboconf:12,robot:12,rubi:12,rule:17,rust:12,scan:11,scdoc:12,scenario:9,schema:12,script:12,semant:12,session:12,sgf:12,shell:12,similar:12,simpl:12,slash:12,smalltalk:12,smart:12,smv:12,snobol:12,solid:12,sourc:[12,20],special:12,specif:12,sql:12,stata:12,state:11,stream:11,stuff:12,style:[3,6,12,17,21],stylesheet:12,subclass:[4,11],suggest:21,supercollid:12,support:[21,23],syntax:12,system:[12,21],tcl:12,templat:[12,23],tera:12,term:12,termin:17,test:[11,12],text:12,textmat:9,than:12,theorem:12,thi:21,token:[11,18],trafficscript:12,trick:11,typoscript:12,ucod:12,unicod:19,unicon:12,urbiscript:12,usag:15,use:21,uses:21,variou:[9,12],varnish:12,verif:12,version:[2,20],visual:12,want:21,web:12,welcom:22,what:21,where:21,whilei:12,who:21,wrapper:9,write:[4,6,11],x10:12,xml:12,xorg:12,your:[4,6,11],zig:12}}) \ No newline at end of file diff --git a/doc/_static/demo.css b/doc/_static/demo.css old mode 100644 new mode 100755 index 9344291..b168bde --- a/doc/_static/demo.css +++ b/doc/_static/demo.css @@ -1,38 +1,38 @@ -#try { - background-color: #f6f6f6; - border-radius: 0; - border: 1px solid #ccc; - margin-top: 15px; - padding: 10px 15px 5px 10px; - position: relative; -} - -#try h2 { - margin-top: 0; -} - -#try textarea { - border: 1px solid #999; - padding: 2px; - width: 100%; - min-height: 150px; -} - -#hlcode pre { - background-color: transparent; - border-radius: 0; -} - -#loading { - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; - margin: auto auto; - background-color: #cccccccc; - display: flex; - flex-direction: column; - justify-content: center; - text-align: center; -} +#try { + background-color: #f6f6f6; + border-radius: 0; + border: 1px solid #ccc; + margin-top: 15px; + padding: 10px 15px 5px 10px; + position: relative; +} + +#try h2 { + margin-top: 0; +} + +#try textarea { + border: 1px solid #999; + padding: 2px; + width: 100%; + min-height: 150px; +} + +#hlcode pre { + background-color: transparent; + border-radius: 0; +} + +#loading { + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; + margin: auto auto; + background-color: #cccccccc; + display: flex; + flex-direction: column; + justify-content: center; + text-align: center; +} diff --git a/doc/_static/demo.js b/doc/_static/demo.js old mode 100644 new mode 100755 index f538492..0968eed --- a/doc/_static/demo.js +++ b/doc/_static/demo.js @@ -1,100 +1,100 @@ -languagePluginLoader.then(() => { - // pyodide is now ready to use... - pyodide.loadPackage('Pygments').then(() => { - pyodide.runPython('import pygments.lexers, pygments.formatters.html, pygments.styles'); - - var lexerlist = pyodide.runPython('list(pygments.lexers.get_all_lexers())'); - var sel = document.getElementById("lang"); - for (lex of lexerlist) { - var opt = document.createElement("option"); - opt.text = lex[0]; - opt.value = lex[1][0]; - sel.add(opt); - } - - var stylelist = pyodide.runPython('list(pygments.styles.get_all_styles())'); - var sel = document.getElementById("style"); - for (sty of stylelist) { - if (sty != "default") { - var opt = document.createElement("option"); - opt.text = sty; - opt.value = sty; - sel.add(opt); - } - } - - document.getElementById("hlbtn").disabled = false; - document.getElementById("loading").style.display = "none"; - }); -}); - -function new_file() { - pyodide.globals['fname'] = document.getElementById("file").files[0].name; - var alias = pyodide.runPython('pygments.lexers.find_lexer_class_for_filename(fname).aliases[0]'); - var sel = document.getElementById("lang"); - for (var i = 0; i < sel.length; i++) { - if (sel.options[i].value == alias) { - sel.selectedIndex = i; - reset_err_hl(); - break; - } - } -} - -function reset_err_hl() { - document.getElementById("aroundlang").style.backgroundColor = null; -} - -function highlight() { - var select = document.getElementById("lang"); - var alias = select.options.item(select.selectedIndex).value - - if (alias == "") { - document.getElementById("aroundlang").style.backgroundColor = "#ffcccc"; - return; - } - pyodide.globals['alias'] = alias; - - var select = document.getElementById("style"); - pyodide.globals['style'] = select.options.item(select.selectedIndex).value; - - pyodide.runPython('lexer = pygments.lexers.get_lexer_by_name(alias)'); - pyodide.runPython('fmter = pygments.formatters.html.HtmlFormatter(noclasses=True, style=style)'); - - var file = document.getElementById("file").files[0]; - if (file) { - file.arrayBuffer().then(function(buf) { - pyodide.globals['code_mem'] = buf; - pyodide.runPython('code = bytes(code_mem)'); - highlight_now(); - }); - } else { - pyodide.globals['code'] = document.getElementById("code").value; - highlight_now(); - } -} - -function highlight_now() { - var out = document.getElementById("hlcode"); - out.innerHTML = pyodide.runPython('pygments.highlight(code, lexer, fmter)'); - document.location.hash = "#try"; - document.getElementById("hlcodedl").style.display = "block"; -} - -function download_code() { - var filename = "highlighted.html"; - var hlcode = document.getElementById("hlcode").innerHTML; - var blob = new Blob([hlcode], {type: 'text/html'}); - if (window.navigator.msSaveOrOpenBlob) { - window.navigator.msSaveBlob(blob, filename); - } - else{ - var elem = window.document.createElement('a'); - elem.href = window.URL.createObjectURL(blob); - elem.download = filename; - document.body.appendChild(elem); - elem.click(); - document.body.removeChild(elem); - window.URL.revokeObjectURL(elem.href); - } -} +languagePluginLoader.then(() => { + // pyodide is now ready to use... + pyodide.loadPackage('Pygments').then(() => { + pyodide.runPython('import pygments.lexers, pygments.formatters.html, pygments.styles'); + + var lexerlist = pyodide.runPython('list(pygments.lexers.get_all_lexers())'); + var sel = document.getElementById("lang"); + for (lex of lexerlist) { + var opt = document.createElement("option"); + opt.text = lex[0]; + opt.value = lex[1][0]; + sel.add(opt); + } + + var stylelist = pyodide.runPython('list(pygments.styles.get_all_styles())'); + var sel = document.getElementById("style"); + for (sty of stylelist) { + if (sty != "default") { + var opt = document.createElement("option"); + opt.text = sty; + opt.value = sty; + sel.add(opt); + } + } + + document.getElementById("hlbtn").disabled = false; + document.getElementById("loading").style.display = "none"; + }); +}); + +function new_file() { + pyodide.globals['fname'] = document.getElementById("file").files[0].name; + var alias = pyodide.runPython('pygments.lexers.find_lexer_class_for_filename(fname).aliases[0]'); + var sel = document.getElementById("lang"); + for (var i = 0; i < sel.length; i++) { + if (sel.options[i].value == alias) { + sel.selectedIndex = i; + reset_err_hl(); + break; + } + } +} + +function reset_err_hl() { + document.getElementById("aroundlang").style.backgroundColor = null; +} + +function highlight() { + var select = document.getElementById("lang"); + var alias = select.options.item(select.selectedIndex).value + + if (alias == "") { + document.getElementById("aroundlang").style.backgroundColor = "#ffcccc"; + return; + } + pyodide.globals['alias'] = alias; + + var select = document.getElementById("style"); + pyodide.globals['style'] = select.options.item(select.selectedIndex).value; + + pyodide.runPython('lexer = pygments.lexers.get_lexer_by_name(alias)'); + pyodide.runPython('fmter = pygments.formatters.html.HtmlFormatter(noclasses=True, style=style)'); + + var file = document.getElementById("file").files[0]; + if (file) { + file.arrayBuffer().then(function(buf) { + pyodide.globals['code_mem'] = buf; + pyodide.runPython('code = bytes(code_mem)'); + highlight_now(); + }); + } else { + pyodide.globals['code'] = document.getElementById("code").value; + highlight_now(); + } +} + +function highlight_now() { + var out = document.getElementById("hlcode"); + out.innerHTML = pyodide.runPython('pygments.highlight(code, lexer, fmter)'); + document.location.hash = "#try"; + document.getElementById("hlcodedl").style.display = "block"; +} + +function download_code() { + var filename = "highlighted.html"; + var hlcode = document.getElementById("hlcode").innerHTML; + var blob = new Blob([hlcode], {type: 'text/html'}); + if (window.navigator.msSaveOrOpenBlob) { + window.navigator.msSaveBlob(blob, filename); + } + else{ + var elem = window.document.createElement('a'); + elem.href = window.URL.createObjectURL(blob); + elem.download = filename; + document.body.appendChild(elem); + elem.click(); + document.body.removeChild(elem); + window.URL.revokeObjectURL(elem.href); + } +} diff --git a/doc/_static/favicon.ico b/doc/_static/favicon.ico old mode 100644 new mode 100755 diff --git a/doc/_static/github.png b/doc/_static/github.png old mode 100644 new mode 100755 diff --git a/doc/_static/logo_new.png b/doc/_static/logo_new.png old mode 100644 new mode 100755 diff --git a/doc/_static/logo_only.png b/doc/_static/logo_only.png old mode 100644 new mode 100755 diff --git a/doc/_static/spinner.gif b/doc/_static/spinner.gif old mode 100644 new mode 100755 diff --git a/doc/_templates/demo.html b/doc/_templates/demo.html old mode 100644 new mode 100755 index bc788d1..fad1cb3 --- a/doc/_templates/demo.html +++ b/doc/_templates/demo.html @@ -1,53 +1,53 @@ -{% extends "layout.html" %} -{% set sidebars = sidebars + ["demo_sidebar.html"] %} - -{% block extrahead %} -{{ super() }} - - - - -{% endblock %} - -{% block htmltitle %}Demo{{ titlesuffix }}{% endblock %} - -{% block body %} -{{ body }} - -

Demo - Try it out!

-

The highlighting here is performed in-browser using - a WebAssembly translation of Pygments, courtesy of - Pyodide.

-

Your content is neither sent over the web nor stored anywhere.

- -
-

Enter code and select a language

-
-

-   -    - ·   -  

-

  -   or enter code below:

-

-

- -    

-
-
-

-

Loading Python...

-
-
- -
- - -{% endblock %} +{% extends "layout.html" %} +{% set sidebars = sidebars + ["demo_sidebar.html"] %} + +{% block extrahead %} +{{ super() }} + + + + +{% endblock %} + +{% block htmltitle %}Demo{{ titlesuffix }}{% endblock %} + +{% block body %} +{{ body }} + +

Demo - Try it out!

+

The highlighting here is performed in-browser using + a WebAssembly translation of the latest Pygments master branch, courtesy of + Pyodide.

+

Your content is neither sent over the web nor stored anywhere.

+ +
+

Enter code and select a language

+
+

+   +    + ·   +  

+

  +   or enter code below:

+

+

+ +    

+
+
+

+

Loading Python...

+
+
+ +
+ + +{% endblock %} diff --git a/doc/_templates/demo_sidebar.html b/doc/_templates/demo_sidebar.html old mode 100644 new mode 100755 index 3f2a86c..2549816 --- a/doc/_templates/demo_sidebar.html +++ b/doc/_templates/demo_sidebar.html @@ -1 +1 @@ -

Back to top

+

Back to top

diff --git a/doc/_templates/docssidebar.html b/doc/_templates/docssidebar.html old mode 100644 new mode 100755 index 913acaa..f404099 --- a/doc/_templates/docssidebar.html +++ b/doc/_templates/docssidebar.html @@ -1,3 +1,3 @@ -{% if pagename != 'docs/index' %} -« Back to docs index -{% endif %} +{% if pagename != 'docs/index' %} +« Back to docs index +{% endif %} diff --git a/doc/_templates/index_with_try.html b/doc/_templates/index_with_try.html old mode 100644 new mode 100755 diff --git a/doc/_templates/indexsidebar.html b/doc/_templates/indexsidebar.html old mode 100644 new mode 100755 index 5f7ecf9..a6cfaa3 --- a/doc/_templates/indexsidebar.html +++ b/doc/_templates/indexsidebar.html @@ -1,24 +1,24 @@ -

Download

-{% if version.endswith('(hg)') %} -

This documentation is for version {{ version }}, which is - not released yet.

-

You can use it from the - Git repo or look for - released versions in the Python - Package Index.

-{% else %} -

Current version: {{ version }}

-

Get Pygments from the Python Package - Index, or install it with:

-
pip install Pygments
-{% endif %} - -

Questions? Suggestions?

- -

- Clone at GitHub.

-

You can also open an issue at the - tracker.

- - +

Download

+{% if version.endswith('(hg)') %} +

This documentation is for version {{ version }}, which is + not released yet.

+

You can use it from the + Git repo or look for + released versions in the Python + Package Index.

+{% else %} +

Current version: {{ version }}

+

Get Pygments from the Python Package + Index, or install it with:

+
pip install Pygments
+{% endif %} + +

Questions? Suggestions?

+ +

+ Clone at GitHub.

+

You can also open an issue at the + tracker.

+ + diff --git a/doc/_themes/pygments14/layout.html b/doc/_themes/pygments14/layout.html old mode 100644 new mode 100755 index 3e04665..70742ce --- a/doc/_themes/pygments14/layout.html +++ b/doc/_themes/pygments14/layout.html @@ -1,98 +1,98 @@ -{# - sphinxdoc/layout.html - ~~~~~~~~~~~~~~~~~~~~~ - - Sphinx layout template for the sphinxdoc theme. - - :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. - :license: BSD, see LICENSE for details. -#} -{%- extends "basic/layout.html" %} - -{# put the sidebar before the body #} -{% block sidebar1 %}{{ sidebar() }}{% endblock %} -{% block sidebar2 %}{% endblock %} - -{% block relbar1 %}{% endblock %} -{% block relbar2 %}{% endblock %} - -{% block extrahead %} - -{{ super() }} -{%- if not embedded %} - - -{%- endif %} -{% endblock %} - -{% block header %} -
- -{% endblock %} - -{% block footer %} - -
{# closes "outerwrapper" div #} -{% endblock %} - -{% block sidebarrel %} -{% endblock %} - -{% block sidebarsourcelink %} -{% endblock %} +{# + sphinxdoc/layout.html + ~~~~~~~~~~~~~~~~~~~~~ + + Sphinx layout template for the sphinxdoc theme. + + :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +#} +{%- extends "basic/layout.html" %} + +{# put the sidebar before the body #} +{% block sidebar1 %}{{ sidebar() }}{% endblock %} +{% block sidebar2 %}{% endblock %} + +{% block relbar1 %}{% endblock %} +{% block relbar2 %}{% endblock %} + +{% block extrahead %} + +{{ super() }} +{%- if not embedded %} + + +{%- endif %} +{% endblock %} + +{% block header %} +
+ +{% endblock %} + +{% block footer %} + +
{# closes "outerwrapper" div #} +{% endblock %} + +{% block sidebarrel %} +{% endblock %} + +{% block sidebarsourcelink %} +{% endblock %} diff --git a/doc/_themes/pygments14/static/bodybg.png b/doc/_themes/pygments14/static/bodybg.png old mode 100644 new mode 100755 diff --git a/doc/_themes/pygments14/static/docbg.png b/doc/_themes/pygments14/static/docbg.png old mode 100644 new mode 100755 diff --git a/doc/_themes/pygments14/static/listitem.png b/doc/_themes/pygments14/static/listitem.png old mode 100644 new mode 100755 diff --git a/doc/_themes/pygments14/static/logo.png b/doc/_themes/pygments14/static/logo.png old mode 100644 new mode 100755 diff --git a/doc/_themes/pygments14/static/pocoo.png b/doc/_themes/pygments14/static/pocoo.png old mode 100644 new mode 100755 diff --git a/doc/_themes/pygments14/static/pygments14.css_t b/doc/_themes/pygments14/static/pygments14.css_t old mode 100644 new mode 100755 index 72ca942..359313a --- a/doc/_themes/pygments14/static/pygments14.css_t +++ b/doc/_themes/pygments14/static/pygments14.css_t @@ -1,401 +1,401 @@ -/* - * pygments14.css - * ~~~~~~~~~~~~~~ - * - * Sphinx stylesheet -- pygments14 theme. Heavily copied from sphinx13. - * - * :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -@import url("basic.css"); - -/* -- page layout ----------------------------------------------------------- */ - -body { - font-family: {{ theme_font }}, 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', - 'Verdana', sans-serif; - font-size: 14px; - text-align: center; - background-image: url(bodybg.png); - background-color: {{ theme_background }}; - color: black; - padding: 0; - /* - border-right: 1px solid {{ theme_border }}; - border-left: 1px solid {{ theme_border }}; - */ - - margin: 0 auto; - min-width: 780px; - max-width: 1080px; -} - -.outerwrapper { - background-image: url(docbg.png); - background-attachment: fixed; -} - -.pageheader { - text-align: left; - padding: 10px 15px; -} - -.pageheader ul { - float: right; - color: white; - list-style-type: none; - padding-left: 0; - margin-top: 40px; - margin-right: 10px; -} - -.pageheader li { - float: left; - margin: 0 0 0 10px; -} - -.pageheader li a { - border-radius: 3px; - padding: 8px 12px; - color: {{ theme_darkgray }}; - text-shadow: 0 0 5px rgba(0, 0, 0, 0.2); -} - -.pageheader li a:hover { - background-color: {{ theme_yellow }}; - color: black; - text-shadow: none; -} - -div.document { - text-align: left; - /*border-left: 1em solid {{ theme_lightyellow }};*/ -} - -div.bodywrapper { - margin: 0 12px 0 240px; - background-color: white; -/* border-right: 1px solid {{ theme_border }}; */ -} - -div.body { - margin: 0; - padding: 0.5em 20px 20px 20px; -} - -div.related { - font-size: 1em; - color: {{ theme_darkgray }}; -} - -div.related ul { - background-image: url(relbg.png); - background-repeat: repeat-y; - background-color: {{ theme_yellow }}; - height: 1.9em; - /* - border-top: 1px solid {{ theme_border }}; - border-bottom: 1px solid {{ theme_border }}; - */ -} - -div.related ul li { - margin: 0 5px 0 0; - padding: 0; - float: left; -} - -div.related ul li.right { - float: right; - margin-right: 5px; -} - -div.related ul li a { - margin: 0; - padding: 0 5px 0 5px; - line-height: 1.75em; - color: {{ theme_darkgray }}; - /*text-shadow: 0px 0px 1px rgba(0, 0, 0, 0.5);*/ -} - -div.related ul li a:hover { - text-decoration: underline; - text-shadow: 0px 0px 1px rgba(255, 255, 255, 0.5); -} - -div.sphinxsidebarwrapper { - position: relative; - top: 0px; - padding: 0; -} - -div.sphinxsidebar { - margin: 0; - padding: 0 0px 15px 15px; - width: 210px; - float: left; - font-size: 1em; - text-align: left; -} - -div.sphinxsidebar .logo { - font-size: 1.8em; - color: #666; - font-weight: 300; - text-align: center; -} - -div.sphinxsidebar .logo img { - vertical-align: middle; -} - -div.sphinxsidebar input { - border: 1px solid #aaa; - font-family: {{ theme_font }}, 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', - 'Verdana', sans-serif; - font-size: 1em; -} - -div.sphinxsidebar h3 { - font-size: 1.5em; - /* border-top: 1px solid {{ theme_border }}; */ - margin-top: 1em; - margin-bottom: 0.5em; - padding-top: 0.5em; -} - -div.sphinxsidebar h4 { - font-size: 1.2em; - margin-bottom: 0; -} - -div.sphinxsidebar h3, div.sphinxsidebar h4 { - margin-right: -15px; - margin-left: -15px; - padding-right: 14px; - padding-left: 14px; - color: #333; - font-weight: 300; - /*text-shadow: 0px 0px 0.5px rgba(0, 0, 0, 0.4);*/ -} - -div.sphinxsidebarwrapper > h3:first-child { - margin-top: 0.5em; - border: none; -} - -div.sphinxsidebar h3 a { - color: #333; -} - -div.sphinxsidebar ul { - color: #444; - margin-top: 7px; - padding: 0; - line-height: 130%; -} - -div.sphinxsidebar ul ul { - margin-left: 20px; - list-style-image: url(listitem.png); -} - -div.footer { - color: {{ theme_darkgray }}; - text-shadow: 0 0 .2px rgba(255, 255, 255, 0.8); - padding: 2em; - text-align: center; - clear: both; - font-size: 0.8em; -} - -/* -- body styles ----------------------------------------------------------- */ - -p { - margin: 0.8em 0 0.5em 0; -} - -a { - color: {{ theme_darkgreen }}; - text-decoration: none; -} - -a:hover { - color: {{ theme_darkyellow }}; -} - -div.body a { - text-decoration: underline; -} - -h1 { - margin: 10px 0 0 0; - font-size: 2.4em; - color: {{ theme_darkgray }}; - font-weight: 300; -} - -h2 { - margin: 1.em 0 0.2em 0; - font-size: 1.5em; - font-weight: 300; - padding: 0; - color: {{ theme_darkgreen }}; -} - -h3 { - margin: 1em 0 -0.3em 0; - font-size: 1.3em; - font-weight: 300; -} - -div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a { - text-decoration: none; -} - -div.body h1 a tt, div.body h2 a tt, div.body h3 a tt, div.body h4 a tt, div.body h5 a tt, div.body h6 a tt { - color: {{ theme_darkgreen }} !important; - font-size: inherit !important; -} - -a.headerlink { - color: {{ theme_green }} !important; - font-size: 12px; - margin-left: 6px; - padding: 0 4px 0 4px; - text-decoration: none !important; - float: right; -} - -a.headerlink:hover { - background-color: #ccc; - color: white!important; -} - -cite, code, tt { - font-family: 'Consolas', 'DejaVu Sans Mono', - 'Bitstream Vera Sans Mono', monospace; - font-size: 14px; - letter-spacing: -0.02em; -} - -tt { - background-color: #f2f2f2; - border: 1px solid #ddd; - border-radius: 2px; - color: #333; - padding: 1px; -} - -tt.descname, tt.descclassname, tt.xref { - border: 0; -} - -hr { - border: 1px solid #abc; - margin: 2em; -} - -a tt { - border: 0; - color: {{ theme_darkgreen }}; -} - -a tt:hover { - color: {{ theme_darkyellow }}; -} - -pre { - font-family: 'Consolas', 'DejaVu Sans Mono', - 'Bitstream Vera Sans Mono', monospace; - font-size: 13px; - letter-spacing: 0.015em; - line-height: 120%; - padding: 0.5em; - border: 1px solid #ccc; - border-radius: 2px; - background-color: #f8f8f8; -} - -pre a { - color: inherit; - text-decoration: underline; -} - -td.linenos pre { - padding: 0.5em 0; -} - -div.quotebar { - background-color: #f8f8f8; - max-width: 250px; - float: right; - padding: 0px 7px; - border: 1px solid #ccc; - margin-left: 1em; -} - -div.topic { - background-color: #f8f8f8; -} - -table { - border-collapse: collapse; - margin: 0 -0.5em 0 -0.5em; -} - -table td, table th { - padding: 0.2em 0.5em 0.2em 0.5em; -} - -div.admonition, div.warning { - font-size: 0.9em; - margin: 1em 0 1em 0; - border: 1px solid #86989B; - border-radius: 2px; - background-color: #f7f7f7; - padding: 0; -} - -div.admonition p, div.warning p { - margin: 0.5em 1em 0.5em 1em; - padding: 0; -} - -div.admonition pre, div.warning pre { - margin: 0.4em 1em 0.4em 1em; -} - -div.admonition p.admonition-title, -div.warning p.admonition-title { - margin-top: 1em; - padding-top: 0.5em; - font-weight: bold; -} - -div.warning { - border: 1px solid #940000; -/* background-color: #FFCCCF;*/ -} - -div.warning p.admonition-title { -} - -div.admonition ul, div.admonition ol, -div.warning ul, div.warning ol { - margin: 0.1em 0.5em 0.5em 3em; - padding: 0; -} - -.viewcode-back { - font-family: {{ theme_font }}, 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', - 'Verdana', sans-serif; -} - -div.viewcode-block:target { - background-color: #f4debf; - border-top: 1px solid #ac9; - border-bottom: 1px solid #ac9; -} +/* + * pygments14.css + * ~~~~~~~~~~~~~~ + * + * Sphinx stylesheet -- pygments14 theme. Heavily copied from sphinx13. + * + * :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: {{ theme_font }}, 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', + 'Verdana', sans-serif; + font-size: 14px; + text-align: center; + background-image: url(bodybg.png); + background-color: {{ theme_background }}; + color: black; + padding: 0; + /* + border-right: 1px solid {{ theme_border }}; + border-left: 1px solid {{ theme_border }}; + */ + + margin: 0 auto; + min-width: 780px; + max-width: 1080px; +} + +.outerwrapper { + background-image: url(docbg.png); + background-attachment: fixed; +} + +.pageheader { + text-align: left; + padding: 10px 15px; +} + +.pageheader ul { + float: right; + color: white; + list-style-type: none; + padding-left: 0; + margin-top: 40px; + margin-right: 10px; +} + +.pageheader li { + float: left; + margin: 0 0 0 10px; +} + +.pageheader li a { + border-radius: 3px; + padding: 8px 12px; + color: {{ theme_darkgray }}; + text-shadow: 0 0 5px rgba(0, 0, 0, 0.2); +} + +.pageheader li a:hover { + background-color: {{ theme_yellow }}; + color: black; + text-shadow: none; +} + +div.document { + text-align: left; + /*border-left: 1em solid {{ theme_lightyellow }};*/ +} + +div.bodywrapper { + margin: 0 12px 0 240px; + background-color: white; +/* border-right: 1px solid {{ theme_border }}; */ +} + +div.body { + margin: 0; + padding: 0.5em 20px 20px 20px; +} + +div.related { + font-size: 1em; + color: {{ theme_darkgray }}; +} + +div.related ul { + background-image: url(relbg.png); + background-repeat: repeat-y; + background-color: {{ theme_yellow }}; + height: 1.9em; + /* + border-top: 1px solid {{ theme_border }}; + border-bottom: 1px solid {{ theme_border }}; + */ +} + +div.related ul li { + margin: 0 5px 0 0; + padding: 0; + float: left; +} + +div.related ul li.right { + float: right; + margin-right: 5px; +} + +div.related ul li a { + margin: 0; + padding: 0 5px 0 5px; + line-height: 1.75em; + color: {{ theme_darkgray }}; + /*text-shadow: 0px 0px 1px rgba(0, 0, 0, 0.5);*/ +} + +div.related ul li a:hover { + text-decoration: underline; + text-shadow: 0px 0px 1px rgba(255, 255, 255, 0.5); +} + +div.sphinxsidebarwrapper { + position: relative; + top: 0px; + padding: 0; +} + +div.sphinxsidebar { + margin: 0; + padding: 0 0px 15px 15px; + width: 210px; + float: left; + font-size: 1em; + text-align: left; +} + +div.sphinxsidebar .logo { + font-size: 1.8em; + color: #666; + font-weight: 300; + text-align: center; +} + +div.sphinxsidebar .logo img { + vertical-align: middle; +} + +div.sphinxsidebar input { + border: 1px solid #aaa; + font-family: {{ theme_font }}, 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', + 'Verdana', sans-serif; + font-size: 1em; +} + +div.sphinxsidebar h3 { + font-size: 1.5em; + /* border-top: 1px solid {{ theme_border }}; */ + margin-top: 1em; + margin-bottom: 0.5em; + padding-top: 0.5em; +} + +div.sphinxsidebar h4 { + font-size: 1.2em; + margin-bottom: 0; +} + +div.sphinxsidebar h3, div.sphinxsidebar h4 { + margin-right: -15px; + margin-left: -15px; + padding-right: 14px; + padding-left: 14px; + color: #333; + font-weight: 300; + /*text-shadow: 0px 0px 0.5px rgba(0, 0, 0, 0.4);*/ +} + +div.sphinxsidebarwrapper > h3:first-child { + margin-top: 0.5em; + border: none; +} + +div.sphinxsidebar h3 a { + color: #333; +} + +div.sphinxsidebar ul { + color: #444; + margin-top: 7px; + padding: 0; + line-height: 130%; +} + +div.sphinxsidebar ul ul { + margin-left: 20px; + list-style-image: url(listitem.png); +} + +div.footer { + color: {{ theme_darkgray }}; + text-shadow: 0 0 .2px rgba(255, 255, 255, 0.8); + padding: 2em; + text-align: center; + clear: both; + font-size: 0.8em; +} + +/* -- body styles ----------------------------------------------------------- */ + +p { + margin: 0.8em 0 0.5em 0; +} + +a { + color: {{ theme_darkgreen }}; + text-decoration: none; +} + +a:hover { + color: {{ theme_darkyellow }}; +} + +div.body a { + text-decoration: underline; +} + +h1 { + margin: 10px 0 0 0; + font-size: 2.4em; + color: {{ theme_darkgray }}; + font-weight: 300; +} + +h2 { + margin: 1.em 0 0.2em 0; + font-size: 1.5em; + font-weight: 300; + padding: 0; + color: {{ theme_darkgreen }}; +} + +h3 { + margin: 1em 0 -0.3em 0; + font-size: 1.3em; + font-weight: 300; +} + +div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a { + text-decoration: none; +} + +div.body h1 a tt, div.body h2 a tt, div.body h3 a tt, div.body h4 a tt, div.body h5 a tt, div.body h6 a tt { + color: {{ theme_darkgreen }} !important; + font-size: inherit !important; +} + +a.headerlink { + color: {{ theme_green }} !important; + font-size: 12px; + margin-left: 6px; + padding: 0 4px 0 4px; + text-decoration: none !important; + float: right; +} + +a.headerlink:hover { + background-color: #ccc; + color: white!important; +} + +cite, code, tt { + font-family: 'Consolas', 'DejaVu Sans Mono', + 'Bitstream Vera Sans Mono', monospace; + font-size: 14px; + letter-spacing: -0.02em; +} + +tt { + background-color: #f2f2f2; + border: 1px solid #ddd; + border-radius: 2px; + color: #333; + padding: 1px; +} + +tt.descname, tt.descclassname, tt.xref { + border: 0; +} + +hr { + border: 1px solid #abc; + margin: 2em; +} + +a tt { + border: 0; + color: {{ theme_darkgreen }}; +} + +a tt:hover { + color: {{ theme_darkyellow }}; +} + +pre { + font-family: 'Consolas', 'DejaVu Sans Mono', + 'Bitstream Vera Sans Mono', monospace; + font-size: 13px; + letter-spacing: 0.015em; + line-height: 120%; + padding: 0.5em; + border: 1px solid #ccc; + border-radius: 2px; + background-color: #f8f8f8; +} + +pre a { + color: inherit; + text-decoration: underline; +} + +td.linenos pre { + padding: 0.5em 0; +} + +div.quotebar { + background-color: #f8f8f8; + max-width: 250px; + float: right; + padding: 0px 7px; + border: 1px solid #ccc; + margin-left: 1em; +} + +div.topic { + background-color: #f8f8f8; +} + +table { + border-collapse: collapse; + margin: 0 -0.5em 0 -0.5em; +} + +table td, table th { + padding: 0.2em 0.5em 0.2em 0.5em; +} + +div.admonition, div.warning { + font-size: 0.9em; + margin: 1em 0 1em 0; + border: 1px solid #86989B; + border-radius: 2px; + background-color: #f7f7f7; + padding: 0; +} + +div.admonition p, div.warning p { + margin: 0.5em 1em 0.5em 1em; + padding: 0; +} + +div.admonition pre, div.warning pre { + margin: 0.4em 1em 0.4em 1em; +} + +div.admonition p.admonition-title, +div.warning p.admonition-title { + margin-top: 1em; + padding-top: 0.5em; + font-weight: bold; +} + +div.warning { + border: 1px solid #940000; +/* background-color: #FFCCCF;*/ +} + +div.warning p.admonition-title { +} + +div.admonition ul, div.admonition ol, +div.warning ul, div.warning ol { + margin: 0.1em 0.5em 0.5em 3em; + padding: 0; +} + +.viewcode-back { + font-family: {{ theme_font }}, 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', + 'Verdana', sans-serif; +} + +div.viewcode-block:target { + background-color: #f4debf; + border-top: 1px solid #ac9; + border-bottom: 1px solid #ac9; +} diff --git a/doc/_themes/pygments14/theme.conf b/doc/_themes/pygments14/theme.conf old mode 100644 new mode 100755 index fffe66d..50b0573 --- a/doc/_themes/pygments14/theme.conf +++ b/doc/_themes/pygments14/theme.conf @@ -1,15 +1,15 @@ -[theme] -inherit = basic -stylesheet = pygments14.css -pygments_style = friendly - -[options] -green = #66b55e -darkgreen = #36852e -darkgray = #666666 -border = #66b55e -yellow = #f4cd00 -darkyellow = #d4ad00 -lightyellow = #fffbe3 -background = #f9f9f9 -font = PT Sans +[theme] +inherit = basic +stylesheet = pygments14.css +pygments_style = friendly + +[options] +green = #66b55e +darkgreen = #36852e +darkgray = #666666 +border = #66b55e +yellow = #f4cd00 +darkyellow = #d4ad00 +lightyellow = #fffbe3 +background = #f9f9f9 +font = PT Sans diff --git a/doc/conf.py b/doc/conf.py old mode 100644 new mode 100755 index 3ab5c2e..be61c64 --- a/doc/conf.py +++ b/doc/conf.py @@ -1,230 +1,230 @@ -# -*- coding: utf-8 -*- -# -# Pygments documentation build configuration file -# - -import sys, os - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('..')) - -import pygments - -# -- General configuration ----------------------------------------------------- - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'pygments.sphinxext'] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Pygments' -copyright = u'2006-2019, Georg Brandl and Pygments contributors' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = pygments.__version__ -# The full version, including alpha/beta/rc tags. -release = version - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['_build'] - -# The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -#pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - - -# -- Options for HTML output --------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'pygments14' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -html_theme_path = ['_themes'] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -html_title = 'Pygments' - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -html_favicon = '_static/favicon.ico' - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -html_sidebars = {'index': ['indexsidebar.html', 'searchbox.html']} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -if os.environ.get('WEBSITE_BUILD'): - html_additional_pages = { - 'demo': 'demo.html', - } - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'Pygments' - - -# -- Options for LaTeX output -------------------------------------------------- - -latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). -latex_documents = [ - ('docs/index', 'Pygments.tex', u'Pygments Documentation', - u'Pygments authors', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output -------------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('docs/index', 'pygments', u'Pygments Documentation', - [u'Pygments authors'], 1) -] - -# If true, show URL addresses after external links. -#man_show_urls = False - - -# Example configuration for intersphinx: refer to the Python standard library. -#intersphinx_mapping = {'http://docs.python.org/': None} - - -def pg_context(app, pagename, templatename, ctx, event_arg): - ctx['demo_active'] = bool(os.environ.get('WEBSITE_BUILD')) - - -def setup(app): - app.connect('html-page-context', pg_context) +# -*- coding: utf-8 -*- +# +# Pygments documentation build configuration file +# + +import sys, os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath('..')) + +import pygments + +# -- General configuration ----------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'pygments.sphinxext'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = 'Pygments' +copyright = '2006-2020, Georg Brandl and Pygments contributors' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = pygments.__version__ +# The full version, including alpha/beta/rc tags. +release = version + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +#pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'pygments14' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +html_theme_path = ['_themes'] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +html_title = 'Pygments' + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +html_favicon = '_static/favicon.ico' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +html_sidebars = {'index': ['indexsidebar.html', 'searchbox.html']} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +if os.environ.get('WEBSITE_BUILD'): + html_additional_pages = { + 'demo': 'demo.html', + } + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'Pygments' + + +# -- Options for LaTeX output -------------------------------------------------- + +latex_elements = { +# The paper size ('letterpaper' or 'a4paper'). +#'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +#'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +#'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('docs/index', 'Pygments.tex', 'Pygments Documentation', + 'Pygments authors', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output -------------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('docs/index', 'pygments', 'Pygments Documentation', + ['Pygments authors'], 1) +] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# Example configuration for intersphinx: refer to the Python standard library. +#intersphinx_mapping = {'http://docs.python.org/': None} + + +def pg_context(app, pagename, templatename, ctx, event_arg): + ctx['demo_active'] = bool(os.environ.get('WEBSITE_BUILD')) + + +def setup(app): + app.connect('html-page-context', pg_context) diff --git a/doc/docs/api.rst b/doc/docs/api.rst old mode 100644 new mode 100755 index a6b242d..05a1dac --- a/doc/docs/api.rst +++ b/doc/docs/api.rst @@ -1,354 +1,354 @@ -.. -*- mode: rst -*- - -===================== -The full Pygments API -===================== - -This page describes the Pygments API. - -High-level API -============== - -.. module:: pygments - -Functions from the :mod:`pygments` module: - -.. function:: lex(code, lexer) - - Lex `code` with the `lexer` (must be a `Lexer` instance) - and return an iterable of tokens. Currently, this only calls - `lexer.get_tokens()`. - -.. function:: format(tokens, formatter, outfile=None) - - Format a token stream (iterable of tokens) `tokens` with the - `formatter` (must be a `Formatter` instance). The result is - written to `outfile`, or if that is ``None``, returned as a - string. - -.. function:: highlight(code, lexer, formatter, outfile=None) - - This is the most high-level highlighting function. - It combines `lex` and `format` in one function. - - -.. module:: pygments.lexers - -Functions from :mod:`pygments.lexers`: - -.. function:: get_lexer_by_name(alias, **options) - - Return an instance of a `Lexer` subclass that has `alias` in its - aliases list. The lexer is given the `options` at its - instantiation. - - Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is - found. - -.. function:: get_lexer_for_filename(fn, **options) - - Return a `Lexer` subclass instance that has a filename pattern - matching `fn`. The lexer is given the `options` at its - instantiation. - - Will raise :exc:`pygments.util.ClassNotFound` if no lexer for that filename - is found. - -.. function:: get_lexer_for_mimetype(mime, **options) - - Return a `Lexer` subclass instance that has `mime` in its mimetype - list. The lexer is given the `options` at its instantiation. - - Will raise :exc:`pygments.util.ClassNotFound` if not lexer for that mimetype - is found. - -.. function:: load_lexer_from_file(filename, lexername="CustomLexer", **options) - - Return a `Lexer` subclass instance loaded from the provided file, relative - to the current directory. The file is expected to contain a Lexer class - named `lexername` (by default, CustomLexer). Users should be very careful with - the input, because this method is equivalent to running eval on the input file. - The lexer is given the `options` at its instantiation. - - :exc:`ClassNotFound` is raised if there are any errors loading the Lexer - - .. versionadded:: 2.2 - -.. function:: guess_lexer(text, **options) - - Return a `Lexer` subclass instance that's guessed from the text in - `text`. For that, the :meth:`.analyse_text()` method of every known lexer - class is called with the text as argument, and the lexer which returned the - highest value will be instantiated and returned. - - :exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can - handle the content. - -.. function:: guess_lexer_for_filename(filename, text, **options) - - As :func:`guess_lexer()`, but only lexers which have a pattern in `filenames` - or `alias_filenames` that matches `filename` are taken into consideration. - - :exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can - handle the content. - -.. function:: get_all_lexers() - - Return an iterable over all registered lexers, yielding tuples in the - format:: - - (longname, tuple of aliases, tuple of filename patterns, tuple of mimetypes) - - .. versionadded:: 0.6 - -.. function:: find_lexer_class_by_name(alias) - - Return the `Lexer` subclass that has `alias` in its aliases list, without - instantiating it. - - Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is - found. - - .. versionadded:: 2.2 - -.. function:: find_lexer_class(name) - - Return the `Lexer` subclass that with the *name* attribute as given by - the *name* argument. - - -.. module:: pygments.formatters - -Functions from :mod:`pygments.formatters`: - -.. function:: get_formatter_by_name(alias, **options) - - Return an instance of a :class:`.Formatter` subclass that has `alias` in its - aliases list. The formatter is given the `options` at its instantiation. - - Will raise :exc:`pygments.util.ClassNotFound` if no formatter with that - alias is found. - -.. function:: get_formatter_for_filename(fn, **options) - - Return a :class:`.Formatter` subclass instance that has a filename pattern - matching `fn`. The formatter is given the `options` at its instantiation. - - Will raise :exc:`pygments.util.ClassNotFound` if no formatter for that filename - is found. - -.. function:: load_formatter_from_file(filename, formattername="CustomFormatter", **options) - - Return a `Formatter` subclass instance loaded from the provided file, relative - to the current directory. The file is expected to contain a Formatter class - named ``formattername`` (by default, CustomFormatter). Users should be very - careful with the input, because this method is equivalent to running eval - on the input file. The formatter is given the `options` at its instantiation. - - :exc:`ClassNotFound` is raised if there are any errors loading the Formatter - - .. versionadded:: 2.2 - -.. module:: pygments.styles - -Functions from :mod:`pygments.styles`: - -.. function:: get_style_by_name(name) - - Return a style class by its short name. The names of the builtin styles - are listed in :data:`pygments.styles.STYLE_MAP`. - - Will raise :exc:`pygments.util.ClassNotFound` if no style of that name is - found. - -.. function:: get_all_styles() - - Return an iterable over all registered styles, yielding their names. - - .. versionadded:: 0.6 - - -.. module:: pygments.lexer - -Lexers -====== - -The base lexer class from which all lexers are derived is: - -.. class:: Lexer(**options) - - The constructor takes a \*\*keywords dictionary of options. - Every subclass must first process its own options and then call - the `Lexer` constructor, since it processes the `stripnl`, - `stripall` and `tabsize` options. - - An example looks like this: - - .. sourcecode:: python - - def __init__(self, **options): - self.compress = options.get('compress', '') - Lexer.__init__(self, **options) - - As these options must all be specifiable as strings (due to the - command line usage), there are various utility functions - available to help with that, see `Option processing`_. - - .. method:: get_tokens(text) - - This method is the basic interface of a lexer. It is called by - the `highlight()` function. It must process the text and return an - iterable of ``(tokentype, value)`` pairs from `text`. - - Normally, you don't need to override this method. The default - implementation processes the `stripnl`, `stripall` and `tabsize` - options and then yields all tokens from `get_tokens_unprocessed()`, - with the ``index`` dropped. - - .. method:: get_tokens_unprocessed(text) - - This method should process the text and return an iterable of - ``(index, tokentype, value)`` tuples where ``index`` is the starting - position of the token within the input text. - - This method must be overridden by subclasses. - - .. staticmethod:: analyse_text(text) - - A static method which is called for lexer guessing. It should analyse - the text and return a float in the range from ``0.0`` to ``1.0``. - If it returns ``0.0``, the lexer will not be selected as the most - probable one, if it returns ``1.0``, it will be selected immediately. - - .. note:: You don't have to add ``@staticmethod`` to the definition of - this method, this will be taken care of by the Lexer's metaclass. - - For a list of known tokens have a look at the :doc:`tokens` page. - - A lexer also can have the following attributes (in fact, they are mandatory - except `alias_filenames`) that are used by the builtin lookup mechanism. - - .. attribute:: name - - Full name for the lexer, in human-readable form. - - .. attribute:: aliases - - A list of short, unique identifiers that can be used to lookup - the lexer from a list, e.g. using `get_lexer_by_name()`. - - .. attribute:: filenames - - A list of `fnmatch` patterns that match filenames which contain - content for this lexer. The patterns in this list should be unique among - all lexers. - - .. attribute:: alias_filenames - - A list of `fnmatch` patterns that match filenames which may or may not - contain content for this lexer. This list is used by the - :func:`.guess_lexer_for_filename()` function, to determine which lexers - are then included in guessing the correct one. That means that - e.g. every lexer for HTML and a template language should include - ``\*.html`` in this list. - - .. attribute:: mimetypes - - A list of MIME types for content that can be lexed with this - lexer. - - -.. module:: pygments.formatter - -Formatters -========== - -A formatter is derived from this class: - - -.. class:: Formatter(**options) - - As with lexers, this constructor processes options and then must call the - base class :meth:`__init__`. - - The :class:`Formatter` class recognizes the options `style`, `full` and - `title`. It is up to the formatter class whether it uses them. - - .. method:: get_style_defs(arg='') - - This method must return statements or declarations suitable to define - the current style for subsequent highlighted text (e.g. CSS classes - in the `HTMLFormatter`). - - The optional argument `arg` can be used to modify the generation and - is formatter dependent (it is standardized because it can be given on - the command line). - - This method is called by the ``-S`` :doc:`command-line option `, - the `arg` is then given by the ``-a`` option. - - .. method:: format(tokensource, outfile) - - This method must format the tokens from the `tokensource` iterable and - write the formatted version to the file object `outfile`. - - Formatter options can control how exactly the tokens are converted. - - .. versionadded:: 0.7 - A formatter must have the following attributes that are used by the - builtin lookup mechanism. - - .. attribute:: name - - Full name for the formatter, in human-readable form. - - .. attribute:: aliases - - A list of short, unique identifiers that can be used to lookup - the formatter from a list, e.g. using :func:`.get_formatter_by_name()`. - - .. attribute:: filenames - - A list of :mod:`fnmatch` patterns that match filenames for which this - formatter can produce output. The patterns in this list should be unique - among all formatters. - - -.. module:: pygments.util - -Option processing -================= - -The :mod:`pygments.util` module has some utility functions usable for option -processing: - -.. exception:: OptionError - - This exception will be raised by all option processing functions if - the type or value of the argument is not correct. - -.. function:: get_bool_opt(options, optname, default=None) - - Interpret the key `optname` from the dictionary `options` as a boolean and - return it. Return `default` if `optname` is not in `options`. - - The valid string values for ``True`` are ``1``, ``yes``, ``true`` and - ``on``, the ones for ``False`` are ``0``, ``no``, ``false`` and ``off`` - (matched case-insensitively). - -.. function:: get_int_opt(options, optname, default=None) - - As :func:`get_bool_opt`, but interpret the value as an integer. - -.. function:: get_list_opt(options, optname, default=None) - - If the key `optname` from the dictionary `options` is a string, - split it at whitespace and return it. If it is already a list - or a tuple, it is returned as a list. - -.. function:: get_choice_opt(options, optname, allowed, default=None) - - If the key `optname` from the dictionary is not in the sequence - `allowed`, raise an error, otherwise return it. - - .. versionadded:: 0.8 +.. -*- mode: rst -*- + +===================== +The full Pygments API +===================== + +This page describes the Pygments API. + +High-level API +============== + +.. module:: pygments + +Functions from the :mod:`pygments` module: + +.. function:: lex(code, lexer) + + Lex `code` with the `lexer` (must be a `Lexer` instance) + and return an iterable of tokens. Currently, this only calls + `lexer.get_tokens()`. + +.. function:: format(tokens, formatter, outfile=None) + + Format a token stream (iterable of tokens) `tokens` with the + `formatter` (must be a `Formatter` instance). The result is + written to `outfile`, or if that is ``None``, returned as a + string. + +.. function:: highlight(code, lexer, formatter, outfile=None) + + This is the most high-level highlighting function. + It combines `lex` and `format` in one function. + + +.. module:: pygments.lexers + +Functions from :mod:`pygments.lexers`: + +.. function:: get_lexer_by_name(alias, **options) + + Return an instance of a `Lexer` subclass that has `alias` in its + aliases list. The lexer is given the `options` at its + instantiation. + + Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is + found. + +.. function:: get_lexer_for_filename(fn, **options) + + Return a `Lexer` subclass instance that has a filename pattern + matching `fn`. The lexer is given the `options` at its + instantiation. + + Will raise :exc:`pygments.util.ClassNotFound` if no lexer for that filename + is found. + +.. function:: get_lexer_for_mimetype(mime, **options) + + Return a `Lexer` subclass instance that has `mime` in its mimetype + list. The lexer is given the `options` at its instantiation. + + Will raise :exc:`pygments.util.ClassNotFound` if not lexer for that mimetype + is found. + +.. function:: load_lexer_from_file(filename, lexername="CustomLexer", **options) + + Return a `Lexer` subclass instance loaded from the provided file, relative + to the current directory. The file is expected to contain a Lexer class + named `lexername` (by default, CustomLexer). Users should be very careful with + the input, because this method is equivalent to running eval on the input file. + The lexer is given the `options` at its instantiation. + + :exc:`ClassNotFound` is raised if there are any errors loading the Lexer + + .. versionadded:: 2.2 + +.. function:: guess_lexer(text, **options) + + Return a `Lexer` subclass instance that's guessed from the text in + `text`. For that, the :meth:`.analyse_text()` method of every known lexer + class is called with the text as argument, and the lexer which returned the + highest value will be instantiated and returned. + + :exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can + handle the content. + +.. function:: guess_lexer_for_filename(filename, text, **options) + + As :func:`guess_lexer()`, but only lexers which have a pattern in `filenames` + or `alias_filenames` that matches `filename` are taken into consideration. + + :exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can + handle the content. + +.. function:: get_all_lexers() + + Return an iterable over all registered lexers, yielding tuples in the + format:: + + (longname, tuple of aliases, tuple of filename patterns, tuple of mimetypes) + + .. versionadded:: 0.6 + +.. function:: find_lexer_class_by_name(alias) + + Return the `Lexer` subclass that has `alias` in its aliases list, without + instantiating it. + + Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is + found. + + .. versionadded:: 2.2 + +.. function:: find_lexer_class(name) + + Return the `Lexer` subclass that with the *name* attribute as given by + the *name* argument. + + +.. module:: pygments.formatters + +Functions from :mod:`pygments.formatters`: + +.. function:: get_formatter_by_name(alias, **options) + + Return an instance of a :class:`.Formatter` subclass that has `alias` in its + aliases list. The formatter is given the `options` at its instantiation. + + Will raise :exc:`pygments.util.ClassNotFound` if no formatter with that + alias is found. + +.. function:: get_formatter_for_filename(fn, **options) + + Return a :class:`.Formatter` subclass instance that has a filename pattern + matching `fn`. The formatter is given the `options` at its instantiation. + + Will raise :exc:`pygments.util.ClassNotFound` if no formatter for that filename + is found. + +.. function:: load_formatter_from_file(filename, formattername="CustomFormatter", **options) + + Return a `Formatter` subclass instance loaded from the provided file, relative + to the current directory. The file is expected to contain a Formatter class + named ``formattername`` (by default, CustomFormatter). Users should be very + careful with the input, because this method is equivalent to running eval + on the input file. The formatter is given the `options` at its instantiation. + + :exc:`ClassNotFound` is raised if there are any errors loading the Formatter + + .. versionadded:: 2.2 + +.. module:: pygments.styles + +Functions from :mod:`pygments.styles`: + +.. function:: get_style_by_name(name) + + Return a style class by its short name. The names of the builtin styles + are listed in :data:`pygments.styles.STYLE_MAP`. + + Will raise :exc:`pygments.util.ClassNotFound` if no style of that name is + found. + +.. function:: get_all_styles() + + Return an iterable over all registered styles, yielding their names. + + .. versionadded:: 0.6 + + +.. module:: pygments.lexer + +Lexers +====== + +The base lexer class from which all lexers are derived is: + +.. class:: Lexer(**options) + + The constructor takes a \*\*keywords dictionary of options. + Every subclass must first process its own options and then call + the `Lexer` constructor, since it processes the `stripnl`, + `stripall` and `tabsize` options. + + An example looks like this: + + .. sourcecode:: python + + def __init__(self, **options): + self.compress = options.get('compress', '') + Lexer.__init__(self, **options) + + As these options must all be specifiable as strings (due to the + command line usage), there are various utility functions + available to help with that, see `Option processing`_. + + .. method:: get_tokens(text) + + This method is the basic interface of a lexer. It is called by + the `highlight()` function. It must process the text and return an + iterable of ``(tokentype, value)`` pairs from `text`. + + Normally, you don't need to override this method. The default + implementation processes the `stripnl`, `stripall` and `tabsize` + options and then yields all tokens from `get_tokens_unprocessed()`, + with the ``index`` dropped. + + .. method:: get_tokens_unprocessed(text) + + This method should process the text and return an iterable of + ``(index, tokentype, value)`` tuples where ``index`` is the starting + position of the token within the input text. + + This method must be overridden by subclasses. + + .. staticmethod:: analyse_text(text) + + A static method which is called for lexer guessing. It should analyse + the text and return a float in the range from ``0.0`` to ``1.0``. + If it returns ``0.0``, the lexer will not be selected as the most + probable one, if it returns ``1.0``, it will be selected immediately. + + .. note:: You don't have to add ``@staticmethod`` to the definition of + this method, this will be taken care of by the Lexer's metaclass. + + For a list of known tokens have a look at the :doc:`tokens` page. + + A lexer also can have the following attributes (in fact, they are mandatory + except `alias_filenames`) that are used by the builtin lookup mechanism. + + .. attribute:: name + + Full name for the lexer, in human-readable form. + + .. attribute:: aliases + + A list of short, unique identifiers that can be used to lookup + the lexer from a list, e.g. using `get_lexer_by_name()`. + + .. attribute:: filenames + + A list of `fnmatch` patterns that match filenames which contain + content for this lexer. The patterns in this list should be unique among + all lexers. + + .. attribute:: alias_filenames + + A list of `fnmatch` patterns that match filenames which may or may not + contain content for this lexer. This list is used by the + :func:`.guess_lexer_for_filename()` function, to determine which lexers + are then included in guessing the correct one. That means that + e.g. every lexer for HTML and a template language should include + ``\*.html`` in this list. + + .. attribute:: mimetypes + + A list of MIME types for content that can be lexed with this + lexer. + + +.. module:: pygments.formatter + +Formatters +========== + +A formatter is derived from this class: + + +.. class:: Formatter(**options) + + As with lexers, this constructor processes options and then must call the + base class :meth:`__init__`. + + The :class:`Formatter` class recognizes the options `style`, `full` and + `title`. It is up to the formatter class whether it uses them. + + .. method:: get_style_defs(arg='') + + This method must return statements or declarations suitable to define + the current style for subsequent highlighted text (e.g. CSS classes + in the `HTMLFormatter`). + + The optional argument `arg` can be used to modify the generation and + is formatter dependent (it is standardized because it can be given on + the command line). + + This method is called by the ``-S`` :doc:`command-line option `, + the `arg` is then given by the ``-a`` option. + + .. method:: format(tokensource, outfile) + + This method must format the tokens from the `tokensource` iterable and + write the formatted version to the file object `outfile`. + + Formatter options can control how exactly the tokens are converted. + + .. versionadded:: 0.7 + A formatter must have the following attributes that are used by the + builtin lookup mechanism. + + .. attribute:: name + + Full name for the formatter, in human-readable form. + + .. attribute:: aliases + + A list of short, unique identifiers that can be used to lookup + the formatter from a list, e.g. using :func:`.get_formatter_by_name()`. + + .. attribute:: filenames + + A list of :mod:`fnmatch` patterns that match filenames for which this + formatter can produce output. The patterns in this list should be unique + among all formatters. + + +.. module:: pygments.util + +Option processing +================= + +The :mod:`pygments.util` module has some utility functions usable for option +processing: + +.. exception:: OptionError + + This exception will be raised by all option processing functions if + the type or value of the argument is not correct. + +.. function:: get_bool_opt(options, optname, default=None) + + Interpret the key `optname` from the dictionary `options` as a boolean and + return it. Return `default` if `optname` is not in `options`. + + The valid string values for ``True`` are ``1``, ``yes``, ``true`` and + ``on``, the ones for ``False`` are ``0``, ``no``, ``false`` and ``off`` + (matched case-insensitively). + +.. function:: get_int_opt(options, optname, default=None) + + As :func:`get_bool_opt`, but interpret the value as an integer. + +.. function:: get_list_opt(options, optname, default=None) + + If the key `optname` from the dictionary `options` is a string, + split it at whitespace and return it. If it is already a list + or a tuple, it is returned as a list. + +.. function:: get_choice_opt(options, optname, allowed, default=None) + + If the key `optname` from the dictionary is not in the sequence + `allowed`, raise an error, otherwise return it. + + .. versionadded:: 0.8 diff --git a/doc/docs/authors.rst b/doc/docs/authors.rst old mode 100644 new mode 100755 index f8373f0..520211f --- a/doc/docs/authors.rst +++ b/doc/docs/authors.rst @@ -1,4 +1,4 @@ -Full contributor list -===================== - -.. include:: ../../AUTHORS +Full contributor list +===================== + +.. include:: ../../AUTHORS diff --git a/doc/docs/changelog.rst b/doc/docs/changelog.rst old mode 100644 new mode 100755 index f264cab..120c054 --- a/doc/docs/changelog.rst +++ b/doc/docs/changelog.rst @@ -1 +1 @@ -.. include:: ../../CHANGES +.. include:: ../../CHANGES diff --git a/doc/docs/cmdline.rst b/doc/docs/cmdline.rst old mode 100644 new mode 100755 index e4f94ea..7e8bfc8 --- a/doc/docs/cmdline.rst +++ b/doc/docs/cmdline.rst @@ -1,166 +1,166 @@ -.. -*- mode: rst -*- - -====================== -Command Line Interface -====================== - -You can use Pygments from the shell, provided you installed the -:program:`pygmentize` script:: - - $ pygmentize test.py - print "Hello World" - -will print the file test.py to standard output, using the Python lexer -(inferred from the file name extension) and the terminal formatter (because -you didn't give an explicit formatter name). - -If you want HTML output:: - - $ pygmentize -f html -l python -o test.html test.py - -As you can see, the -l option explicitly selects a lexer. As seen above, if you -give an input file name and it has an extension that Pygments recognizes, you can -omit this option. - -The ``-o`` option gives an output file name. If it is not given, output is -written to stdout. - -The ``-f`` option selects a formatter (as with ``-l``, it can also be omitted -if an output file name is given and has a supported extension). -If no output file name is given and ``-f`` is omitted, the -:class:`.TerminalFormatter` is used. - -The above command could therefore also be given as:: - - $ pygmentize -o test.html test.py - -To create a full HTML document, including line numbers and stylesheet (using the -"emacs" style), highlighting the Python file ``test.py`` to ``test.html``:: - - $ pygmentize -O full,style=emacs -o test.html test.py - - -Options and filters -------------------- - -Lexer and formatter options can be given using the ``-O`` option:: - - $ pygmentize -f html -O style=colorful,linenos=1 -l python test.py - -Be sure to enclose the option string in quotes if it contains any special shell -characters, such as spaces or expansion wildcards like ``*``. If an option -expects a list value, separate the list entries with spaces (you'll have to -quote the option value in this case too, so that the shell doesn't split it). - -Since the ``-O`` option argument is split at commas and expects the split values -to be of the form ``name=value``, you can't give an option value that contains -commas or equals signs. Therefore, an option ``-P`` is provided (as of Pygments -0.9) that works like ``-O`` but can only pass one option per ``-P``. Its value -can then contain all characters:: - - $ pygmentize -P "heading=Pygments, the Python highlighter" ... - -Filters are added to the token stream using the ``-F`` option:: - - $ pygmentize -f html -l pascal -F keywordcase:case=upper main.pas - -As you see, options for the filter are given after a colon. As for ``-O``, the -filter name and options must be one shell word, so there may not be any spaces -around the colon. - - -Generating styles ------------------ - -Formatters normally don't output full style information. For example, the HTML -formatter by default only outputs ```` tags with ``class`` attributes. -Therefore, there's a special ``-S`` option for generating style definitions. -Usage is as follows:: - - $ pygmentize -f html -S colorful -a .syntax - -generates a CSS style sheet (because you selected the HTML formatter) for -the "colorful" style prepending a ".syntax" selector to all style rules. - -For an explanation what ``-a`` means for :doc:`a particular formatter -`, look for the `arg` argument for the formatter's -:meth:`.get_style_defs()` method. - - -Getting lexer names -------------------- - -.. versionadded:: 1.0 - -The ``-N`` option guesses a lexer name for a given filename, so that :: - - $ pygmentize -N setup.py - -will print out ``python``. It won't highlight anything yet. If no specific -lexer is known for that filename, ``text`` is printed. - -Custom Lexers and Formatters ----------------------------- - -.. versionadded:: 2.2 - -The ``-x`` flag enables custom lexers and formatters to be loaded -from files relative to the current directory. Create a file with a class named -CustomLexer or CustomFormatter, then specify it on the command line:: - - $ pygmentize -l your_lexer.py -f your_formatter.py -x - -You can also specify the name of your class with a colon:: - - $ pygmentize -l your_lexer.py:SomeLexer -x - -For more information, see :doc:`the Pygments documentation on Lexer development -`. - -Getting help ------------- - -The ``-L`` option lists lexers, formatters, along with their short -names and supported file name extensions, styles and filters. If you want to see -only one category, give it as an argument:: - - $ pygmentize -L filters - -will list only all installed filters. - -The ``-H`` option will give you detailed information (the same that can be found -in this documentation) about a lexer, formatter or filter. Usage is as follows:: - - $ pygmentize -H formatter html - -will print the help for the HTML formatter, while :: - - $ pygmentize -H lexer python - -will print the help for the Python lexer, etc. - - -A note on encodings -------------------- - -.. versionadded:: 0.9 - -Pygments tries to be smart regarding encodings in the formatting process: - -* If you give an ``encoding`` option, it will be used as the input and - output encoding. - -* If you give an ``outencoding`` option, it will override ``encoding`` - as the output encoding. - -* If you give an ``inencoding`` option, it will override ``encoding`` - as the input encoding. - -* If you don't give an encoding and have given an output file, the default - encoding for lexer and formatter is the terminal encoding or the default - locale encoding of the system. As a last resort, ``latin1`` is used (which - will pass through all non-ASCII characters). - -* If you don't give an encoding and haven't given an output file (that means - output is written to the console), the default encoding for lexer and - formatter is the terminal encoding (``sys.stdout.encoding``). +.. -*- mode: rst -*- + +====================== +Command Line Interface +====================== + +You can use Pygments from the shell, provided you installed the +:program:`pygmentize` script:: + + $ pygmentize test.py + print "Hello World" + +will print the file test.py to standard output, using the Python lexer +(inferred from the file name extension) and the terminal formatter (because +you didn't give an explicit formatter name). + +If you want HTML output:: + + $ pygmentize -f html -l python -o test.html test.py + +As you can see, the -l option explicitly selects a lexer. As seen above, if you +give an input file name and it has an extension that Pygments recognizes, you can +omit this option. + +The ``-o`` option gives an output file name. If it is not given, output is +written to stdout. + +The ``-f`` option selects a formatter (as with ``-l``, it can also be omitted +if an output file name is given and has a supported extension). +If no output file name is given and ``-f`` is omitted, the +:class:`.TerminalFormatter` is used. + +The above command could therefore also be given as:: + + $ pygmentize -o test.html test.py + +To create a full HTML document, including line numbers and stylesheet (using the +"emacs" style), highlighting the Python file ``test.py`` to ``test.html``:: + + $ pygmentize -O full,style=emacs -o test.html test.py + + +Options and filters +------------------- + +Lexer and formatter options can be given using the ``-O`` option:: + + $ pygmentize -f html -O style=colorful,linenos=1 -l python test.py + +Be sure to enclose the option string in quotes if it contains any special shell +characters, such as spaces or expansion wildcards like ``*``. If an option +expects a list value, separate the list entries with spaces (you'll have to +quote the option value in this case too, so that the shell doesn't split it). + +Since the ``-O`` option argument is split at commas and expects the split values +to be of the form ``name=value``, you can't give an option value that contains +commas or equals signs. Therefore, an option ``-P`` is provided (as of Pygments +0.9) that works like ``-O`` but can only pass one option per ``-P``. Its value +can then contain all characters:: + + $ pygmentize -P "heading=Pygments, the Python highlighter" ... + +Filters are added to the token stream using the ``-F`` option:: + + $ pygmentize -f html -l pascal -F keywordcase:case=upper main.pas + +As you see, options for the filter are given after a colon. As for ``-O``, the +filter name and options must be one shell word, so there may not be any spaces +around the colon. + + +Generating styles +----------------- + +Formatters normally don't output full style information. For example, the HTML +formatter by default only outputs ```` tags with ``class`` attributes. +Therefore, there's a special ``-S`` option for generating style definitions. +Usage is as follows:: + + $ pygmentize -f html -S colorful -a .syntax + +generates a CSS style sheet (because you selected the HTML formatter) for +the "colorful" style prepending a ".syntax" selector to all style rules. + +For an explanation what ``-a`` means for :doc:`a particular formatter +`, look for the `arg` argument for the formatter's +:meth:`.get_style_defs()` method. + + +Getting lexer names +------------------- + +.. versionadded:: 1.0 + +The ``-N`` option guesses a lexer name for a given filename, so that :: + + $ pygmentize -N setup.py + +will print out ``python``. It won't highlight anything yet. If no specific +lexer is known for that filename, ``text`` is printed. + +Custom Lexers and Formatters +---------------------------- + +.. versionadded:: 2.2 + +The ``-x`` flag enables custom lexers and formatters to be loaded +from files relative to the current directory. Create a file with a class named +CustomLexer or CustomFormatter, then specify it on the command line:: + + $ pygmentize -l your_lexer.py -f your_formatter.py -x + +You can also specify the name of your class with a colon:: + + $ pygmentize -l your_lexer.py:SomeLexer -x + +For more information, see :doc:`the Pygments documentation on Lexer development +`. + +Getting help +------------ + +The ``-L`` option lists lexers, formatters, along with their short +names and supported file name extensions, styles and filters. If you want to see +only one category, give it as an argument:: + + $ pygmentize -L filters + +will list only all installed filters. + +The ``-H`` option will give you detailed information (the same that can be found +in this documentation) about a lexer, formatter or filter. Usage is as follows:: + + $ pygmentize -H formatter html + +will print the help for the HTML formatter, while :: + + $ pygmentize -H lexer python + +will print the help for the Python lexer, etc. + + +A note on encodings +------------------- + +.. versionadded:: 0.9 + +Pygments tries to be smart regarding encodings in the formatting process: + +* If you give an ``encoding`` option, it will be used as the input and + output encoding. + +* If you give an ``outencoding`` option, it will override ``encoding`` + as the output encoding. + +* If you give an ``inencoding`` option, it will override ``encoding`` + as the input encoding. + +* If you don't give an encoding and have given an output file, the default + encoding for lexer and formatter is the terminal encoding or the default + locale encoding of the system. As a last resort, ``latin1`` is used (which + will pass through all non-ASCII characters). + +* If you don't give an encoding and haven't given an output file (that means + output is written to the console), the default encoding for lexer and + formatter is the terminal encoding (``sys.stdout.encoding``). diff --git a/doc/docs/filterdevelopment.rst b/doc/docs/filterdevelopment.rst old mode 100644 new mode 100755 index fbcd0a0..8696da8 --- a/doc/docs/filterdevelopment.rst +++ b/doc/docs/filterdevelopment.rst @@ -1,71 +1,71 @@ -.. -*- mode: rst -*- - -===================== -Write your own filter -===================== - -.. versionadded:: 0.7 - -Writing own filters is very easy. All you have to do is to subclass -the `Filter` class and override the `filter` method. Additionally a -filter is instantiated with some keyword arguments you can use to -adjust the behavior of your filter. - - -Subclassing Filters -=================== - -As an example, we write a filter that converts all `Name.Function` tokens -to normal `Name` tokens to make the output less colorful. - -.. sourcecode:: python - - from pygments.util import get_bool_opt - from pygments.token import Name - from pygments.filter import Filter - - class UncolorFilter(Filter): - - def __init__(self, **options): - Filter.__init__(self, **options) - self.class_too = get_bool_opt(options, 'classtoo') - - def filter(self, lexer, stream): - for ttype, value in stream: - if ttype is Name.Function or (self.class_too and - ttype is Name.Class): - ttype = Name - yield ttype, value - -Some notes on the `lexer` argument: that can be quite confusing since it doesn't -need to be a lexer instance. If a filter was added by using the `add_filter()` -function of lexers, that lexer is registered for the filter. In that case -`lexer` will refer to the lexer that has registered the filter. It *can* be used -to access options passed to a lexer. Because it could be `None` you always have -to check for that case if you access it. - - -Using a decorator -================= - -You can also use the `simplefilter` decorator from the `pygments.filter` module: - -.. sourcecode:: python - - from pygments.util import get_bool_opt - from pygments.token import Name - from pygments.filter import simplefilter - - - @simplefilter - def uncolor(self, lexer, stream, options): - class_too = get_bool_opt(options, 'classtoo') - for ttype, value in stream: - if ttype is Name.Function or (class_too and - ttype is Name.Class): - ttype = Name - yield ttype, value - -The decorator automatically subclasses an internal filter class and uses the -decorated function as a method for filtering. (That's why there is a `self` -argument that you probably won't end up using in the method.) +.. -*- mode: rst -*- + +===================== +Write your own filter +===================== + +.. versionadded:: 0.7 + +Writing own filters is very easy. All you have to do is to subclass +the `Filter` class and override the `filter` method. Additionally a +filter is instantiated with some keyword arguments you can use to +adjust the behavior of your filter. + + +Subclassing Filters +=================== + +As an example, we write a filter that converts all `Name.Function` tokens +to normal `Name` tokens to make the output less colorful. + +.. sourcecode:: python + + from pygments.util import get_bool_opt + from pygments.token import Name + from pygments.filter import Filter + + class UncolorFilter(Filter): + + def __init__(self, **options): + Filter.__init__(self, **options) + self.class_too = get_bool_opt(options, 'classtoo') + + def filter(self, lexer, stream): + for ttype, value in stream: + if ttype is Name.Function or (self.class_too and + ttype is Name.Class): + ttype = Name + yield ttype, value + +Some notes on the `lexer` argument: that can be quite confusing since it doesn't +need to be a lexer instance. If a filter was added by using the `add_filter()` +function of lexers, that lexer is registered for the filter. In that case +`lexer` will refer to the lexer that has registered the filter. It *can* be used +to access options passed to a lexer. Because it could be `None` you always have +to check for that case if you access it. + + +Using a decorator +================= + +You can also use the `simplefilter` decorator from the `pygments.filter` module: + +.. sourcecode:: python + + from pygments.util import get_bool_opt + from pygments.token import Name + from pygments.filter import simplefilter + + + @simplefilter + def uncolor(self, lexer, stream, options): + class_too = get_bool_opt(options, 'classtoo') + for ttype, value in stream: + if ttype is Name.Function or (class_too and + ttype is Name.Class): + ttype = Name + yield ttype, value + +The decorator automatically subclasses an internal filter class and uses the +decorated function as a method for filtering. (That's why there is a `self` +argument that you probably won't end up using in the method.) diff --git a/doc/docs/filters.rst b/doc/docs/filters.rst old mode 100644 new mode 100755 index ff2519a..0de3432 --- a/doc/docs/filters.rst +++ b/doc/docs/filters.rst @@ -1,41 +1,41 @@ -.. -*- mode: rst -*- - -======= -Filters -======= - -.. versionadded:: 0.7 - -You can filter token streams coming from lexers to improve or annotate the -output. For example, you can highlight special words in comments, convert -keywords to upper or lowercase to enforce a style guide etc. - -To apply a filter, you can use the `add_filter()` method of a lexer: - -.. sourcecode:: pycon - - >>> from pygments.lexers import PythonLexer - >>> l = PythonLexer() - >>> # add a filter given by a string and options - >>> l.add_filter('codetagify', case='lower') - >>> l.filters - [] - >>> from pygments.filters import KeywordCaseFilter - >>> # or give an instance - >>> l.add_filter(KeywordCaseFilter(case='lower')) - -The `add_filter()` method takes keyword arguments which are forwarded to -the constructor of the filter. - -To get a list of all registered filters by name, you can use the -`get_all_filters()` function from the `pygments.filters` module that returns an -iterable for all known filters. - -If you want to write your own filter, have a look at :doc:`Write your own filter -`. - - -Builtin Filters -=============== - -.. pygmentsdoc:: filters +.. -*- mode: rst -*- + +======= +Filters +======= + +.. versionadded:: 0.7 + +You can filter token streams coming from lexers to improve or annotate the +output. For example, you can highlight special words in comments, convert +keywords to upper or lowercase to enforce a style guide etc. + +To apply a filter, you can use the `add_filter()` method of a lexer: + +.. sourcecode:: pycon + + >>> from pygments.lexers import PythonLexer + >>> l = PythonLexer() + >>> # add a filter given by a string and options + >>> l.add_filter('codetagify', case='lower') + >>> l.filters + [] + >>> from pygments.filters import KeywordCaseFilter + >>> # or give an instance + >>> l.add_filter(KeywordCaseFilter(case='lower')) + +The `add_filter()` method takes keyword arguments which are forwarded to +the constructor of the filter. + +To get a list of all registered filters by name, you can use the +`get_all_filters()` function from the `pygments.filters` module that returns an +iterable for all known filters. + +If you want to write your own filter, have a look at :doc:`Write your own filter +`. + + +Builtin Filters +=============== + +.. pygmentsdoc:: filters diff --git a/doc/docs/formatterdevelopment.rst b/doc/docs/formatterdevelopment.rst old mode 100644 new mode 100755 index 2bfac05..f257d48 --- a/doc/docs/formatterdevelopment.rst +++ b/doc/docs/formatterdevelopment.rst @@ -1,169 +1,169 @@ -.. -*- mode: rst -*- - -======================== -Write your own formatter -======================== - -As well as creating :doc:`your own lexer `, writing a new -formatter for Pygments is easy and straightforward. - -A formatter is a class that is initialized with some keyword arguments (the -formatter options) and that must provides a `format()` method. -Additionally a formatter should provide a `get_style_defs()` method that -returns the style definitions from the style in a form usable for the -formatter's output format. - - -Quickstart -========== - -The most basic formatter shipped with Pygments is the `NullFormatter`. It just -sends the value of a token to the output stream: - -.. sourcecode:: python - - from pygments.formatter import Formatter - - class NullFormatter(Formatter): - def format(self, tokensource, outfile): - for ttype, value in tokensource: - outfile.write(value) - -As you can see, the `format()` method is passed two parameters: `tokensource` -and `outfile`. The first is an iterable of ``(token_type, value)`` tuples, -the latter a file like object with a `write()` method. - -Because the formatter is that basic it doesn't overwrite the `get_style_defs()` -method. - - -Styles -====== - -Styles aren't instantiated but their metaclass provides some class functions -so that you can access the style definitions easily. - -Styles are iterable and yield tuples in the form ``(ttype, d)`` where `ttype` -is a token and `d` is a dict with the following keys: - -``'color'`` - Hexadecimal color value (eg: ``'ff0000'`` for red) or `None` if not - defined. - -``'bold'`` - `True` if the value should be bold - -``'italic'`` - `True` if the value should be italic - -``'underline'`` - `True` if the value should be underlined - -``'bgcolor'`` - Hexadecimal color value for the background (eg: ``'eeeeeee'`` for light - gray) or `None` if not defined. - -``'border'`` - Hexadecimal color value for the border (eg: ``'0000aa'`` for a dark - blue) or `None` for no border. - -Additional keys might appear in the future, formatters should ignore all keys -they don't support. - - -HTML 3.2 Formatter -================== - -For an more complex example, let's implement a HTML 3.2 Formatter. We don't -use CSS but inline markup (````, ````, etc). Because this isn't good -style this formatter isn't in the standard library ;-) - -.. sourcecode:: python - - from pygments.formatter import Formatter - - class OldHtmlFormatter(Formatter): - - def __init__(self, **options): - Formatter.__init__(self, **options) - - # create a dict of (start, end) tuples that wrap the - # value of a token so that we can use it in the format - # method later - self.styles = {} - - # we iterate over the `_styles` attribute of a style item - # that contains the parsed style values. - for token, style in self.style: - start = end = '' - # a style item is a tuple in the following form: - # colors are readily specified in hex: 'RRGGBB' - if style['color']: - start += '' % style['color'] - end = '' + end - if style['bold']: - start += '' - end = '' + end - if style['italic']: - start += '' - end = '' + end - if style['underline']: - start += '' - end = '' + end - self.styles[token] = (start, end) - - def format(self, tokensource, outfile): - # lastval is a string we use for caching - # because it's possible that an lexer yields a number - # of consecutive tokens with the same token type. - # to minimize the size of the generated html markup we - # try to join the values of same-type tokens here - lastval = '' - lasttype = None - - # wrap the whole output with
-            outfile.write('
')
-
-            for ttype, value in tokensource:
-                # if the token type doesn't exist in the stylemap
-                # we try it with the parent of the token type
-                # eg: parent of Token.Literal.String.Double is
-                # Token.Literal.String
-                while ttype not in self.styles:
-                    ttype = ttype.parent
-                if ttype == lasttype:
-                    # the current token type is the same of the last
-                    # iteration. cache it
-                    lastval += value
-                else:
-                    # not the same token as last iteration, but we
-                    # have some data in the buffer. wrap it with the
-                    # defined style and write it to the output file
-                    if lastval:
-                        stylebegin, styleend = self.styles[lasttype]
-                        outfile.write(stylebegin + lastval + styleend)
-                    # set lastval/lasttype to current values
-                    lastval = value
-                    lasttype = ttype
-
-            # if something is left in the buffer, write it to the
-            # output file, then close the opened 
 tag
-            if lastval:
-                stylebegin, styleend = self.styles[lasttype]
-                outfile.write(stylebegin + lastval + styleend)
-            outfile.write('
\n') - -The comments should explain it. Again, this formatter doesn't override the -`get_style_defs()` method. If we would have used CSS classes instead of -inline HTML markup, we would need to generate the CSS first. For that -purpose the `get_style_defs()` method exists: - - -Generating Style Definitions -============================ - -Some formatters like the `LatexFormatter` and the `HtmlFormatter` don't -output inline markup but reference either macros or css classes. Because -the definitions of those are not part of the output, the `get_style_defs()` -method exists. It is passed one parameter (if it's used and how it's used -is up to the formatter) and has to return a string or ``None``. +.. -*- mode: rst -*- + +======================== +Write your own formatter +======================== + +As well as creating :doc:`your own lexer `, writing a new +formatter for Pygments is easy and straightforward. + +A formatter is a class that is initialized with some keyword arguments (the +formatter options) and that must provides a `format()` method. +Additionally a formatter should provide a `get_style_defs()` method that +returns the style definitions from the style in a form usable for the +formatter's output format. + + +Quickstart +========== + +The most basic formatter shipped with Pygments is the `NullFormatter`. It just +sends the value of a token to the output stream: + +.. sourcecode:: python + + from pygments.formatter import Formatter + + class NullFormatter(Formatter): + def format(self, tokensource, outfile): + for ttype, value in tokensource: + outfile.write(value) + +As you can see, the `format()` method is passed two parameters: `tokensource` +and `outfile`. The first is an iterable of ``(token_type, value)`` tuples, +the latter a file like object with a `write()` method. + +Because the formatter is that basic it doesn't overwrite the `get_style_defs()` +method. + + +Styles +====== + +Styles aren't instantiated but their metaclass provides some class functions +so that you can access the style definitions easily. + +Styles are iterable and yield tuples in the form ``(ttype, d)`` where `ttype` +is a token and `d` is a dict with the following keys: + +``'color'`` + Hexadecimal color value (eg: ``'ff0000'`` for red) or `None` if not + defined. + +``'bold'`` + `True` if the value should be bold + +``'italic'`` + `True` if the value should be italic + +``'underline'`` + `True` if the value should be underlined + +``'bgcolor'`` + Hexadecimal color value for the background (eg: ``'eeeeeee'`` for light + gray) or `None` if not defined. + +``'border'`` + Hexadecimal color value for the border (eg: ``'0000aa'`` for a dark + blue) or `None` for no border. + +Additional keys might appear in the future, formatters should ignore all keys +they don't support. + + +HTML 3.2 Formatter +================== + +For an more complex example, let's implement a HTML 3.2 Formatter. We don't +use CSS but inline markup (````, ````, etc). Because this isn't good +style this formatter isn't in the standard library ;-) + +.. sourcecode:: python + + from pygments.formatter import Formatter + + class OldHtmlFormatter(Formatter): + + def __init__(self, **options): + Formatter.__init__(self, **options) + + # create a dict of (start, end) tuples that wrap the + # value of a token so that we can use it in the format + # method later + self.styles = {} + + # we iterate over the `_styles` attribute of a style item + # that contains the parsed style values. + for token, style in self.style: + start = end = '' + # a style item is a tuple in the following form: + # colors are readily specified in hex: 'RRGGBB' + if style['color']: + start += '' % style['color'] + end = '' + end + if style['bold']: + start += '' + end = '' + end + if style['italic']: + start += '' + end = '' + end + if style['underline']: + start += '' + end = '' + end + self.styles[token] = (start, end) + + def format(self, tokensource, outfile): + # lastval is a string we use for caching + # because it's possible that an lexer yields a number + # of consecutive tokens with the same token type. + # to minimize the size of the generated html markup we + # try to join the values of same-type tokens here + lastval = '' + lasttype = None + + # wrap the whole output with
+            outfile.write('
')
+
+            for ttype, value in tokensource:
+                # if the token type doesn't exist in the stylemap
+                # we try it with the parent of the token type
+                # eg: parent of Token.Literal.String.Double is
+                # Token.Literal.String
+                while ttype not in self.styles:
+                    ttype = ttype.parent
+                if ttype == lasttype:
+                    # the current token type is the same of the last
+                    # iteration. cache it
+                    lastval += value
+                else:
+                    # not the same token as last iteration, but we
+                    # have some data in the buffer. wrap it with the
+                    # defined style and write it to the output file
+                    if lastval:
+                        stylebegin, styleend = self.styles[lasttype]
+                        outfile.write(stylebegin + lastval + styleend)
+                    # set lastval/lasttype to current values
+                    lastval = value
+                    lasttype = ttype
+
+            # if something is left in the buffer, write it to the
+            # output file, then close the opened 
 tag
+            if lastval:
+                stylebegin, styleend = self.styles[lasttype]
+                outfile.write(stylebegin + lastval + styleend)
+            outfile.write('
\n') + +The comments should explain it. Again, this formatter doesn't override the +`get_style_defs()` method. If we would have used CSS classes instead of +inline HTML markup, we would need to generate the CSS first. For that +purpose the `get_style_defs()` method exists: + + +Generating Style Definitions +============================ + +Some formatters like the `LatexFormatter` and the `HtmlFormatter` don't +output inline markup but reference either macros or css classes. Because +the definitions of those are not part of the output, the `get_style_defs()` +method exists. It is passed one parameter (if it's used and how it's used +is up to the formatter) and has to return a string or ``None``. diff --git a/doc/docs/formatters.rst b/doc/docs/formatters.rst old mode 100644 new mode 100755 index 9e7074e..5d177e9 --- a/doc/docs/formatters.rst +++ b/doc/docs/formatters.rst @@ -1,48 +1,48 @@ -.. -*- mode: rst -*- - -==================== -Available formatters -==================== - -This page lists all builtin formatters. - -Common options -============== - -All formatters support these options: - -`encoding` - If given, must be an encoding name (such as ``"utf-8"``). This will - be used to convert the token strings (which are Unicode strings) - to byte strings in the output (default: ``None``). - It will also be written in an encoding declaration suitable for the - document format if the `full` option is given (e.g. a ``meta - content-type`` directive in HTML or an invocation of the `inputenc` - package in LaTeX). - - If this is ``""`` or ``None``, Unicode strings will be written - to the output file, which most file-like objects do not support. - For example, `pygments.highlight()` will return a Unicode string if - called with no `outfile` argument and a formatter that has `encoding` - set to ``None`` because it uses a `StringIO.StringIO` object that - supports Unicode arguments to `write()`. Using a regular file object - wouldn't work. - - .. versionadded:: 0.6 - -`outencoding` - When using Pygments from the command line, any `encoding` option given is - passed to the lexer and the formatter. This is sometimes not desirable, - for example if you want to set the input encoding to ``"guess"``. - Therefore, `outencoding` has been introduced which overrides `encoding` - for the formatter if given. - - .. versionadded:: 0.7 - - -Formatter classes -================= - -All these classes are importable from :mod:`pygments.formatters`. - -.. pygmentsdoc:: formatters +.. -*- mode: rst -*- + +==================== +Available formatters +==================== + +This page lists all builtin formatters. + +Common options +============== + +All formatters support these options: + +`encoding` + If given, must be an encoding name (such as ``"utf-8"``). This will + be used to convert the token strings (which are Unicode strings) + to byte strings in the output (default: ``None``). + It will also be written in an encoding declaration suitable for the + document format if the `full` option is given (e.g. a ``meta + content-type`` directive in HTML or an invocation of the `inputenc` + package in LaTeX). + + If this is ``""`` or ``None``, Unicode strings will be written + to the output file, which most file-like objects do not support. + For example, `pygments.highlight()` will return a Unicode string if + called with no `outfile` argument and a formatter that has `encoding` + set to ``None`` because it uses a `StringIO.StringIO` object that + supports Unicode arguments to `write()`. Using a regular file object + wouldn't work. + + .. versionadded:: 0.6 + +`outencoding` + When using Pygments from the command line, any `encoding` option given is + passed to the lexer and the formatter. This is sometimes not desirable, + for example if you want to set the input encoding to ``"guess"``. + Therefore, `outencoding` has been introduced which overrides `encoding` + for the formatter if given. + + .. versionadded:: 0.7 + + +Formatter classes +================= + +All these classes are importable from :mod:`pygments.formatters`. + +.. pygmentsdoc:: formatters diff --git a/doc/docs/index.rst b/doc/docs/index.rst old mode 100644 new mode 100755 index 1c96e62..9b606ae --- a/doc/docs/index.rst +++ b/doc/docs/index.rst @@ -1,61 +1,61 @@ -Pygments documentation -====================== - -**Starting with Pygments** - -.. toctree:: - :maxdepth: 1 - - ../download - quickstart - cmdline - -**Builtin components** - -.. toctree:: - :maxdepth: 1 - - lexers - filters - formatters - styles - -**Reference** - -.. toctree:: - :maxdepth: 1 - - unicode - tokens - api - -**Hacking for Pygments** - -.. toctree:: - :maxdepth: 1 - - lexerdevelopment - formatterdevelopment - filterdevelopment - plugins - -**Hints and tricks** - -.. toctree:: - :maxdepth: 1 - - rstdirective - moinmoin - java - integrate - -**About Pygments** - -.. toctree:: - :maxdepth: 1 - - changelog - authors - -If you find bugs or have suggestions for the documentation, please submit them -on `GitHub `_. +Pygments documentation +====================== + +**Starting with Pygments** + +.. toctree:: + :maxdepth: 1 + + ../download + quickstart + cmdline + +**Builtin components** + +.. toctree:: + :maxdepth: 1 + + lexers + filters + formatters + styles + +**Reference** + +.. toctree:: + :maxdepth: 1 + + unicode + tokens + api + +**Hacking for Pygments** + +.. toctree:: + :maxdepth: 1 + + lexerdevelopment + formatterdevelopment + filterdevelopment + plugins + +**Hints and tricks** + +.. toctree:: + :maxdepth: 1 + + rstdirective + moinmoin + java + integrate + +**About Pygments** + +.. toctree:: + :maxdepth: 1 + + changelog + authors + +If you find bugs or have suggestions for the documentation, please submit them +on `GitHub `_. diff --git a/doc/docs/integrate.rst b/doc/docs/integrate.rst old mode 100644 new mode 100755 index 5f266ac..06ff2b4 --- a/doc/docs/integrate.rst +++ b/doc/docs/integrate.rst @@ -1,40 +1,40 @@ -.. -*- mode: rst -*- - -=================================== -Using Pygments in various scenarios -=================================== - -Markdown --------- - -Since Pygments 0.9, the distribution ships Markdown_ preprocessor sample code -that uses Pygments to render source code in -:file:`external/markdown-processor.py`. You can copy and adapt it to your -liking. - -.. _Markdown: https://pypi.org/project/Markdown/ - -TextMate --------- - -Antonio Cangiano has created a Pygments bundle for TextMate that allows to -colorize code via a simple menu option. It can be found here_. - -.. _here: https://programmingzen.com/pygments-textmate-bundle/ - -Bash completion ---------------- - -The source distribution contains a file ``external/pygments.bashcomp`` that -sets up completion for the ``pygmentize`` command in bash. - -Wrappers for other languages ----------------------------- - -These libraries provide Pygments highlighting for users of other languages -than Python: - -* `pygments.rb `_, a pygments wrapper for Ruby -* `Clygments `_, a pygments wrapper for - Clojure -* `PHPygments `_, a pygments wrapper for PHP +.. -*- mode: rst -*- + +=================================== +Using Pygments in various scenarios +=================================== + +Markdown +-------- + +Since Pygments 0.9, the distribution ships Markdown_ preprocessor sample code +that uses Pygments to render source code in +:file:`external/markdown-processor.py`. You can copy and adapt it to your +liking. + +.. _Markdown: https://pypi.org/project/Markdown/ + +TextMate +-------- + +Antonio Cangiano has created a Pygments bundle for TextMate that allows to +colorize code via a simple menu option. It can be found here_. + +.. _here: https://programmingzen.com/pygments-textmate-bundle/ + +Bash completion +--------------- + +The source distribution contains a file ``external/pygments.bashcomp`` that +sets up completion for the ``pygmentize`` command in bash. + +Wrappers for other languages +---------------------------- + +These libraries provide Pygments highlighting for users of other languages +than Python: + +* `pygments.rb `_, a pygments wrapper for Ruby +* `Clygments `_, a pygments wrapper for + Clojure +* `PHPygments `_, a pygments wrapper for PHP diff --git a/doc/docs/java.rst b/doc/docs/java.rst old mode 100644 new mode 100755 index a8a5beb..9e1b7c7 --- a/doc/docs/java.rst +++ b/doc/docs/java.rst @@ -1,70 +1,70 @@ -===================== -Use Pygments in Java -===================== - -Thanks to `Jython `_ it is possible to use Pygments in -Java. - -This page is a simple tutorial to get an idea of how this works. You can -then look at the `Jython documentation `_ for more -advanced uses. - -Since version 1.5, Pygments is deployed on `Maven Central -`_ as a JAR, as is Jython -which makes it a lot easier to create a Java project. - -Here is an example of a `Maven `_ ``pom.xml`` file for a -project running Pygments: - -.. sourcecode:: xml - - - - - 4.0.0 - example - example - 1.0-SNAPSHOT - - - org.python - jython-standalone - 2.5.3 - - - org.pygments - pygments - 1.5 - runtime - - - - -The following Java example: - -.. sourcecode:: java - - PythonInterpreter interpreter = new PythonInterpreter(); - - // Set a variable with the content you want to work with - interpreter.set("code", code); - - // Simple use Pygments as you would in Python - interpreter.exec("from pygments import highlight\n" - + "from pygments.lexers import PythonLexer\n" - + "from pygments.formatters import HtmlFormatter\n" - + "\nresult = highlight(code, PythonLexer(), HtmlFormatter())"); - - // Get the result that has been set in a variable - System.out.println(interpreter.get("result", String.class)); - -will print something like: - -.. sourcecode:: html - -
-
print "Hello World"
-
+===================== +Use Pygments in Java +===================== + +Thanks to `Jython `_ it is possible to use Pygments in +Java. + +This page is a simple tutorial to get an idea of how this works. You can +then look at the `Jython documentation `_ for more +advanced uses. + +Since version 1.5, Pygments is deployed on `Maven Central +`_ as a JAR, as is Jython +which makes it a lot easier to create a Java project. + +Here is an example of a `Maven `_ ``pom.xml`` file for a +project running Pygments: + +.. sourcecode:: xml + + + + + 4.0.0 + example + example + 1.0-SNAPSHOT + + + org.python + jython-standalone + 2.5.3 + + + org.pygments + pygments + 1.5 + runtime + + + + +The following Java example: + +.. sourcecode:: java + + PythonInterpreter interpreter = new PythonInterpreter(); + + // Set a variable with the content you want to work with + interpreter.set("code", code); + + // Simple use Pygments as you would in Python + interpreter.exec("from pygments import highlight\n" + + "from pygments.lexers import PythonLexer\n" + + "from pygments.formatters import HtmlFormatter\n" + + "\nresult = highlight(code, PythonLexer(), HtmlFormatter())"); + + // Get the result that has been set in a variable + System.out.println(interpreter.get("result", String.class)); + +will print something like: + +.. sourcecode:: html + +
+
print "Hello World"
+
diff --git a/doc/docs/lexerdevelopment.rst b/doc/docs/lexerdevelopment.rst old mode 100644 new mode 100755 index c776457..3642d1d --- a/doc/docs/lexerdevelopment.rst +++ b/doc/docs/lexerdevelopment.rst @@ -1,729 +1,736 @@ -.. -*- mode: rst -*- - -.. highlight:: python - -==================== -Write your own lexer -==================== - -If a lexer for your favorite language is missing in the Pygments package, you -can easily write your own and extend Pygments. - -All you need can be found inside the :mod:`pygments.lexer` module. As you can -read in the :doc:`API documentation `, a lexer is a class that is -initialized with some keyword arguments (the lexer options) and that provides a -:meth:`.get_tokens_unprocessed()` method which is given a string or unicode -object with the data to lex. - -The :meth:`.get_tokens_unprocessed()` method must return an iterator or iterable -containing tuples in the form ``(index, token, value)``. Normally you don't -need to do this since there are base lexers that do most of the work and that -you can subclass. - - -RegexLexer -========== - -The lexer base class used by almost all of Pygments' lexers is the -:class:`RegexLexer`. This class allows you to define lexing rules in terms of -*regular expressions* for different *states*. - -States are groups of regular expressions that are matched against the input -string at the *current position*. If one of these expressions matches, a -corresponding action is performed (such as yielding a token with a specific -type, or changing state), the current position is set to where the last match -ended and the matching process continues with the first regex of the current -state. - -Lexer states are kept on a stack: each time a new state is entered, the new -state is pushed onto the stack. The most basic lexers (like the `DiffLexer`) -just need one state. - -Each state is defined as a list of tuples in the form (`regex`, `action`, -`new_state`) where the last item is optional. In the most basic form, `action` -is a token type (like `Name.Builtin`). That means: When `regex` matches, emit a -token with the match text and type `tokentype` and push `new_state` on the state -stack. If the new state is ``'#pop'``, the topmost state is popped from the -stack instead. To pop more than one state, use ``'#pop:2'`` and so on. -``'#push'`` is a synonym for pushing the current state on the stack. - -The following example shows the `DiffLexer` from the builtin lexers. Note that -it contains some additional attributes `name`, `aliases` and `filenames` which -aren't required for a lexer. They are used by the builtin lexer lookup -functions. :: - - from pygments.lexer import RegexLexer - from pygments.token import * - - class DiffLexer(RegexLexer): - name = 'Diff' - aliases = ['diff'] - filenames = ['*.diff'] - - tokens = { - 'root': [ - (r' .*\n', Text), - (r'\+.*\n', Generic.Inserted), - (r'-.*\n', Generic.Deleted), - (r'@.*\n', Generic.Subheading), - (r'Index.*\n', Generic.Heading), - (r'=.*\n', Generic.Heading), - (r'.*\n', Text), - ] - } - -As you can see this lexer only uses one state. When the lexer starts scanning -the text, it first checks if the current character is a space. If this is true -it scans everything until newline and returns the data as a `Text` token (which -is the "no special highlighting" token). - -If this rule doesn't match, it checks if the current char is a plus sign. And -so on. - -If no rule matches at the current position, the current char is emitted as an -`Error` token that indicates a lexing error, and the position is increased by -one. - - -Adding and testing a new lexer -============================== - -The easiest way to use a new lexer is to use Pygments' support for loading -the lexer from a file relative to your current directory. - -First, change the name of your lexer class to CustomLexer: - -.. code-block:: python - - from pygments.lexer import RegexLexer - from pygments.token import * - - class CustomLexer(RegexLexer): - """All your lexer code goes here!""" - -Then you can load the lexer from the command line with the additional -flag ``-x``: - -.. code-block:: console - - $ python -m pygments -l your_lexer_file.py -x - -To specify a class name other than CustomLexer, append it with a colon: - -.. code-block:: console - - $ python -m pygments -l your_lexer.py:SomeLexer -x - -Or, using the Python API: - -.. code-block:: python - - # For a lexer named CustomLexer - your_lexer = load_lexer_from_file(filename, **options) - - # For a lexer named MyNewLexer - your_named_lexer = load_lexer_from_file(filename, "MyNewLexer", **options) - -When loading custom lexers and formatters, be extremely careful to use only -trusted files; Pygments will perform the equivalent of ``eval`` on them. - -If you only want to use your lexer with the Pygments API, you can import and -instantiate the lexer yourself, then pass it to :func:`pygments.highlight`. - -To prepare your new lexer for inclusion in the Pygments distribution, so that it -will be found when passing filenames or lexer aliases from the command line, you -have to perform the following steps. - -First, change to the current directory containing the Pygments source code. You -will need to have either an unpacked source tarball, or (preferably) a copy -cloned from GitHub. - -.. code-block:: console - - $ cd pygments - -Select a matching module under ``pygments/lexers``, or create a new module for -your lexer class. - -Next, make sure the lexer is known from outside of the module. All modules in -the ``pygments.lexers`` package specify ``__all__``. For example, -``esoteric.py`` sets:: - - __all__ = ['BrainfuckLexer', 'BefungeLexer', ...] - -Add the name of your lexer class to this list (or create the list if your lexer -is the only class in the module). - -Finally the lexer can be made publicly known by rebuilding the lexer mapping: - -.. code-block:: console - - $ make mapfiles - -To test the new lexer, store an example file with the proper extension in -``tests/examplefiles``. For example, to test your ``DiffLexer``, add a -``tests/examplefiles/example.diff`` containing a sample diff output. - -Now you can use ``python -m pygments`` from the current root of the checkout to -render your example to HTML: - -.. code-block:: console - - $ python -m pygments -O full -f html -o /tmp/example.html tests/examplefiles/example.diff - -Note that this explicitly calls the ``pygments`` module in the current -directory. This ensures your modifications are used. Otherwise a possibly -already installed, unmodified version without your new lexer would have been -called from the system search path (``$PATH``). - -To view the result, open ``/tmp/example.html`` in your browser. - -Once the example renders as expected, you should run the complete test suite: - -.. code-block:: console - - $ make test - -It also tests that your lexer fulfills the lexer API and certain invariants, -such as that the concatenation of all token text is the same as the input text. - - -Regex Flags -=========== - -You can either define regex flags locally in the regex (``r'(?x)foo bar'``) or -globally by adding a `flags` attribute to your lexer class. If no attribute is -defined, it defaults to `re.MULTILINE`. For more information about regular -expression flags see the page about `regular expressions`_ in the Python -documentation. - -.. _regular expressions: https://docs.python.org/library/re.html#regular-expression-syntax - - -Scanning multiple tokens at once -================================ - -So far, the `action` element in the rule tuple of regex, action and state has -been a single token type. Now we look at the first of several other possible -values. - -Here is a more complex lexer that highlights INI files. INI files consist of -sections, comments and ``key = value`` pairs:: - - from pygments.lexer import RegexLexer, bygroups - from pygments.token import * - - class IniLexer(RegexLexer): - name = 'INI' - aliases = ['ini', 'cfg'] - filenames = ['*.ini', '*.cfg'] - - tokens = { - 'root': [ - (r'\s+', Text), - (r';.*?$', Comment), - (r'\[.*?\]$', Keyword), - (r'(.*?)(\s*)(=)(\s*)(.*?)$', - bygroups(Name.Attribute, Text, Operator, Text, String)) - ] - } - -The lexer first looks for whitespace, comments and section names. Later it -looks for a line that looks like a key, value pair, separated by an ``'='`` -sign, and optional whitespace. - -The `bygroups` helper yields each capturing group in the regex with a different -token type. First the `Name.Attribute` token, then a `Text` token for the -optional whitespace, after that a `Operator` token for the equals sign. Then a -`Text` token for the whitespace again. The rest of the line is returned as -`String`. - -Note that for this to work, every part of the match must be inside a capturing -group (a ``(...)``), and there must not be any nested capturing groups. If you -nevertheless need a group, use a non-capturing group defined using this syntax: -``(?:some|words|here)`` (note the ``?:`` after the beginning parenthesis). - -If you find yourself needing a capturing group inside the regex which shouldn't -be part of the output but is used in the regular expressions for backreferencing -(eg: ``r'(<(foo|bar)>)(.*?)()'``), you can pass `None` to the bygroups -function and that group will be skipped in the output. - - -Changing states -=============== - -Many lexers need multiple states to work as expected. For example, some -languages allow multiline comments to be nested. Since this is a recursive -pattern it's impossible to lex just using regular expressions. - -Here is a lexer that recognizes C++ style comments (multi-line with ``/* */`` -and single-line with ``//`` until end of line):: - - from pygments.lexer import RegexLexer - from pygments.token import * - - class CppCommentLexer(RegexLexer): - name = 'Example Lexer with states' - - tokens = { - 'root': [ - (r'[^/]+', Text), - (r'/\*', Comment.Multiline, 'comment'), - (r'//.*?$', Comment.Singleline), - (r'/', Text) - ], - 'comment': [ - (r'[^*/]', Comment.Multiline), - (r'/\*', Comment.Multiline, '#push'), - (r'\*/', Comment.Multiline, '#pop'), - (r'[*/]', Comment.Multiline) - ] - } - -This lexer starts lexing in the ``'root'`` state. It tries to match as much as -possible until it finds a slash (``'/'``). If the next character after the slash -is an asterisk (``'*'``) the `RegexLexer` sends those two characters to the -output stream marked as `Comment.Multiline` and continues lexing with the rules -defined in the ``'comment'`` state. - -If there wasn't an asterisk after the slash, the `RegexLexer` checks if it's a -Singleline comment (i.e. followed by a second slash). If this also wasn't the -case it must be a single slash, which is not a comment starter (the separate -regex for a single slash must also be given, else the slash would be marked as -an error token). - -Inside the ``'comment'`` state, we do the same thing again. Scan until the -lexer finds a star or slash. If it's the opening of a multiline comment, push -the ``'comment'`` state on the stack and continue scanning, again in the -``'comment'`` state. Else, check if it's the end of the multiline comment. If -yes, pop one state from the stack. - -Note: If you pop from an empty stack you'll get an `IndexError`. (There is an -easy way to prevent this from happening: don't ``'#pop'`` in the root state). - -If the `RegexLexer` encounters a newline that is flagged as an error token, the -stack is emptied and the lexer continues scanning in the ``'root'`` state. This -can help producing error-tolerant highlighting for erroneous input, e.g. when a -single-line string is not closed. - - -Advanced state tricks -===================== - -There are a few more things you can do with states: - -- You can push multiple states onto the stack if you give a tuple instead of a - simple string as the third item in a rule tuple. For example, if you want to - match a comment containing a directive, something like: - - .. code-block:: text - - /* rest of comment */ - - you can use this rule:: - - tokens = { - 'root': [ - (r'/\* <', Comment, ('comment', 'directive')), - ... - ], - 'directive': [ - (r'[^>]*', Comment.Directive), - (r'>', Comment, '#pop'), - ], - 'comment': [ - (r'[^*]+', Comment), - (r'\*/', Comment, '#pop'), - (r'\*', Comment), - ] - } - - When this encounters the above sample, first ``'comment'`` and ``'directive'`` - are pushed onto the stack, then the lexer continues in the directive state - until it finds the closing ``>``, then it continues in the comment state until - the closing ``*/``. Then, both states are popped from the stack again and - lexing continues in the root state. - - .. versionadded:: 0.9 - The tuple can contain the special ``'#push'`` and ``'#pop'`` (but not - ``'#pop:n'``) directives. - - -- You can include the rules of a state in the definition of another. This is - done by using `include` from `pygments.lexer`:: - - from pygments.lexer import RegexLexer, bygroups, include - from pygments.token import * - - class ExampleLexer(RegexLexer): - tokens = { - 'comments': [ - (r'/\*.*?\*/', Comment), - (r'//.*?\n', Comment), - ], - 'root': [ - include('comments'), - (r'(function )(\w+)( {)', - bygroups(Keyword, Name, Keyword), 'function'), - (r'.', Text), - ], - 'function': [ - (r'[^}/]+', Text), - include('comments'), - (r'/', Text), - (r'\}', Keyword, '#pop'), - ] - } - - This is a hypothetical lexer for a language that consist of functions and - comments. Because comments can occur at toplevel and in functions, we need - rules for comments in both states. As you can see, the `include` helper saves - repeating rules that occur more than once (in this example, the state - ``'comment'`` will never be entered by the lexer, as it's only there to be - included in ``'root'`` and ``'function'``). - -- Sometimes, you may want to "combine" a state from existing ones. This is - possible with the `combined` helper from `pygments.lexer`. - - If you, instead of a new state, write ``combined('state1', 'state2')`` as the - third item of a rule tuple, a new anonymous state will be formed from state1 - and state2 and if the rule matches, the lexer will enter this state. - - This is not used very often, but can be helpful in some cases, such as the - `PythonLexer`'s string literal processing. - -- If you want your lexer to start lexing in a different state you can modify the - stack by overriding the `get_tokens_unprocessed()` method:: - - from pygments.lexer import RegexLexer - - class ExampleLexer(RegexLexer): - tokens = {...} - - def get_tokens_unprocessed(self, text, stack=('root', 'otherstate')): - for item in RegexLexer.get_tokens_unprocessed(self, text, stack): - yield item - - Some lexers like the `PhpLexer` use this to make the leading ``', Name.Tag), - ], - 'script-content': [ - (r'(.+?)(<\s*/\s*script\s*>)', - bygroups(using(JavascriptLexer), Name.Tag), - '#pop'), - ] - } - -Here the content of a ```` end tag is processed by the `JavascriptLexer`, -while the end tag is yielded as a normal token with the `Name.Tag` type. - -Also note the ``(r'<\s*script\s*', Name.Tag, ('script-content', 'tag'))`` rule. -Here, two states are pushed onto the state stack, ``'script-content'`` and -``'tag'``. That means that first ``'tag'`` is processed, which will lex -attributes and the closing ``>``, then the ``'tag'`` state is popped and the -next state on top of the stack will be ``'script-content'``. - -Since you cannot refer to the class currently being defined, use `this` -(imported from `pygments.lexer`) to refer to the current lexer class, i.e. -``using(this)``. This construct may seem unnecessary, but this is often the -most obvious way of lexing arbitrary syntax between fixed delimiters without -introducing deeply nested states. - -The `using()` helper has a special keyword argument, `state`, which works as -follows: if given, the lexer to use initially is not in the ``"root"`` state, -but in the state given by this argument. This does not work with advanced -`RegexLexer` subclasses such as `ExtendedRegexLexer` (see below). - -Any other keywords arguments passed to `using()` are added to the keyword -arguments used to create the lexer. - - -Delegating Lexer -================ - -Another approach for nested lexers is the `DelegatingLexer` which is for example -used for the template engine lexers. It takes two lexers as arguments on -initialisation: a `root_lexer` and a `language_lexer`. - -The input is processed as follows: First, the whole text is lexed with the -`language_lexer`. All tokens yielded with the special type of ``Other`` are -then concatenated and given to the `root_lexer`. The language tokens of the -`language_lexer` are then inserted into the `root_lexer`'s token stream at the -appropriate positions. :: - - from pygments.lexer import DelegatingLexer - from pygments.lexers.web import HtmlLexer, PhpLexer - - class HtmlPhpLexer(DelegatingLexer): - def __init__(self, **options): - super(HtmlPhpLexer, self).__init__(HtmlLexer, PhpLexer, **options) - -This procedure ensures that e.g. HTML with template tags in it is highlighted -correctly even if the template tags are put into HTML tags or attributes. - -If you want to change the needle token ``Other`` to something else, you can give -the lexer another token type as the third parameter:: - - DelegatingLexer.__init__(MyLexer, OtherLexer, Text, **options) - - -Callbacks -========= - -Sometimes the grammar of a language is so complex that a lexer would be unable -to process it just by using regular expressions and stacks. - -For this, the `RegexLexer` allows callbacks to be given in rule tuples, instead -of token types (`bygroups` and `using` are nothing else but preimplemented -callbacks). The callback must be a function taking two arguments: - -* the lexer itself -* the match object for the last matched rule - -The callback must then return an iterable of (or simply yield) ``(index, -tokentype, value)`` tuples, which are then just passed through by -`get_tokens_unprocessed()`. The ``index`` here is the position of the token in -the input string, ``tokentype`` is the normal token type (like `Name.Builtin`), -and ``value`` the associated part of the input string. - -You can see an example here:: - - from pygments.lexer import RegexLexer - from pygments.token import Generic - - class HypotheticLexer(RegexLexer): - - def headline_callback(lexer, match): - equal_signs = match.group(1) - text = match.group(2) - yield match.start(), Generic.Headline, equal_signs + text + equal_signs - - tokens = { - 'root': [ - (r'(=+)(.*?)(\1)', headline_callback) - ] - } - -If the regex for the `headline_callback` matches, the function is called with -the match object. Note that after the callback is done, processing continues -normally, that is, after the end of the previous match. The callback has no -possibility to influence the position. - -There are not really any simple examples for lexer callbacks, but you can see -them in action e.g. in the `SMLLexer` class in `ml.py`_. - -.. _ml.py: https://github.com/pygments/pygments/blob/master/pygments/lexers/ml.py - - -The ExtendedRegexLexer class -============================ - -The `RegexLexer`, even with callbacks, unfortunately isn't powerful enough for -the funky syntax rules of languages such as Ruby. - -But fear not; even then you don't have to abandon the regular expression -approach: Pygments has a subclass of `RegexLexer`, the `ExtendedRegexLexer`. -All features known from RegexLexers are available here too, and the tokens are -specified in exactly the same way, *except* for one detail: - -The `get_tokens_unprocessed()` method holds its internal state data not as local -variables, but in an instance of the `pygments.lexer.LexerContext` class, and -that instance is passed to callbacks as a third argument. This means that you -can modify the lexer state in callbacks. - -The `LexerContext` class has the following members: - -* `text` -- the input text -* `pos` -- the current starting position that is used for matching regexes -* `stack` -- a list containing the state stack -* `end` -- the maximum position to which regexes are matched, this defaults to - the length of `text` - -Additionally, the `get_tokens_unprocessed()` method can be given a -`LexerContext` instead of a string and will then process this context instead of -creating a new one for the string argument. - -Note that because you can set the current position to anything in the callback, -it won't be automatically be set by the caller after the callback is finished. -For example, this is how the hypothetical lexer above would be written with the -`ExtendedRegexLexer`:: - - from pygments.lexer import ExtendedRegexLexer - from pygments.token import Generic - - class ExHypotheticLexer(ExtendedRegexLexer): - - def headline_callback(lexer, match, ctx): - equal_signs = match.group(1) - text = match.group(2) - yield match.start(), Generic.Headline, equal_signs + text + equal_signs - ctx.pos = match.end() - - tokens = { - 'root': [ - (r'(=+)(.*?)(\1)', headline_callback) - ] - } - -This might sound confusing (and it can really be). But it is needed, and for an -example look at the Ruby lexer in `ruby.py`_. - -.. _ruby.py: https://github.com/pygments/pygments/blob/master/pygments/lexers/ruby.py - - -Handling Lists of Keywords -========================== - -For a relatively short list (hundreds) you can construct an optimized regular -expression directly using ``words()`` (longer lists, see next section). This -function handles a few things for you automatically, including escaping -metacharacters and Python's first-match rather than longest-match in -alternations. Feel free to put the lists themselves in -``pygments/lexers/_$lang_builtins.py`` (see examples there), and generated by -code if possible. - -An example of using ``words()`` is something like:: - - from pygments.lexer import RegexLexer, words, Name - - class MyLexer(RegexLexer): - - tokens = { - 'root': [ - (words(('else', 'elseif'), suffix=r'\b'), Name.Builtin), - (r'\w+', Name), - ], - } - -As you can see, you can add ``prefix`` and ``suffix`` parts to the constructed -regex. - - -Modifying Token Streams -======================= - -Some languages ship a lot of builtin functions (for example PHP). The total -amount of those functions differs from system to system because not everybody -has every extension installed. In the case of PHP there are over 3000 builtin -functions. That's an incredibly huge amount of functions, much more than you -want to put into a regular expression. - -But because only `Name` tokens can be function names this is solvable by -overriding the ``get_tokens_unprocessed()`` method. The following lexer -subclasses the `PythonLexer` so that it highlights some additional names as -pseudo keywords:: - - from pygments.lexers.python import PythonLexer - from pygments.token import Name, Keyword - - class MyPythonLexer(PythonLexer): - EXTRA_KEYWORDS = set(('foo', 'bar', 'foobar', 'barfoo', 'spam', 'eggs')) - - def get_tokens_unprocessed(self, text): - for index, token, value in PythonLexer.get_tokens_unprocessed(self, text): - if token is Name and value in self.EXTRA_KEYWORDS: - yield index, Keyword.Pseudo, value - else: - yield index, token, value - -The `PhpLexer` and `LuaLexer` use this method to resolve builtin functions. +.. -*- mode: rst -*- + +.. highlight:: python + +==================== +Write your own lexer +==================== + +If a lexer for your favorite language is missing in the Pygments package, you +can easily write your own and extend Pygments. + +All you need can be found inside the :mod:`pygments.lexer` module. As you can +read in the :doc:`API documentation `, a lexer is a class that is +initialized with some keyword arguments (the lexer options) and that provides a +:meth:`.get_tokens_unprocessed()` method which is given a string or unicode +object with the data to lex. + +The :meth:`.get_tokens_unprocessed()` method must return an iterator or iterable +containing tuples in the form ``(index, token, value)``. Normally you don't +need to do this since there are base lexers that do most of the work and that +you can subclass. + +RegexLexer +========== + +The lexer base class used by almost all of Pygments' lexers is the +:class:`RegexLexer`. This class allows you to define lexing rules in terms of +*regular expressions* for different *states*. + +States are groups of regular expressions that are matched against the input +string at the *current position*. If one of these expressions matches, a +corresponding action is performed (such as yielding a token with a specific +type, or changing state), the current position is set to where the last match +ended and the matching process continues with the first regex of the current +state. + +Lexer states are kept on a stack: each time a new state is entered, the new +state is pushed onto the stack. The most basic lexers (like the `DiffLexer`) +just need one state. + +Each state is defined as a list of tuples in the form (`regex`, `action`, +`new_state`) where the last item is optional. In the most basic form, `action` +is a token type (like `Name.Builtin`). That means: When `regex` matches, emit a +token with the match text and type `tokentype` and push `new_state` on the state +stack. If the new state is ``'#pop'``, the topmost state is popped from the +stack instead. To pop more than one state, use ``'#pop:2'`` and so on. +``'#push'`` is a synonym for pushing the current state on the stack. + +The following example shows the `DiffLexer` from the builtin lexers. Note that +it contains some additional attributes `name`, `aliases` and `filenames` which +aren't required for a lexer. They are used by the builtin lexer lookup +functions. :: + + from pygments.lexer import RegexLexer + from pygments.token import * + + class DiffLexer(RegexLexer): + name = 'Diff' + aliases = ['diff'] + filenames = ['*.diff'] + + tokens = { + 'root': [ + (r' .*\n', Text), + (r'\+.*\n', Generic.Inserted), + (r'-.*\n', Generic.Deleted), + (r'@.*\n', Generic.Subheading), + (r'Index.*\n', Generic.Heading), + (r'=.*\n', Generic.Heading), + (r'.*\n', Text), + ] + } + +As you can see this lexer only uses one state. When the lexer starts scanning +the text, it first checks if the current character is a space. If this is true +it scans everything until newline and returns the data as a `Text` token (which +is the "no special highlighting" token). + +If this rule doesn't match, it checks if the current char is a plus sign. And +so on. + +If no rule matches at the current position, the current char is emitted as an +`Error` token that indicates a lexing error, and the position is increased by +one. + + +Adding and testing a new lexer +============================== + +The easiest way to use a new lexer is to use Pygments' support for loading +the lexer from a file relative to your current directory. + +First, change the name of your lexer class to CustomLexer: + +.. code-block:: python + + from pygments.lexer import RegexLexer + from pygments.token import * + + class CustomLexer(RegexLexer): + """All your lexer code goes here!""" + +Then you can load and test the lexer from the command line with the additional +flag ``-x``: + +.. code-block:: console + + $ python -m pygments -x -l your_lexer_file.py + +To specify a class name other than CustomLexer, append it with a colon: + +.. code-block:: console + + $ python -m pygments -x -l your_lexer.py:SomeLexer + +Use the ``-f`` flag to select a different output format than terminal +escape sequences. + +Or, using the Python API: + +.. code-block:: python + + # For a lexer named CustomLexer + your_lexer = load_lexer_from_file(filename, **options) + + # For a lexer named MyNewLexer + your_named_lexer = load_lexer_from_file(filename, "MyNewLexer", **options) + +When loading custom lexers and formatters, be extremely careful to use only +trusted files; Pygments will perform the equivalent of ``eval`` on them. + +If you only want to use your lexer with the Pygments API, you can import and +instantiate the lexer yourself, then pass it to :func:`pygments.highlight`. + +To prepare your new lexer for inclusion in the Pygments distribution, so that it +will be found when passing filenames or lexer aliases from the command line, you +have to perform the following steps. + +First, change to the current directory containing the Pygments source code. You +will need to have either an unpacked source tarball, or (preferably) a copy +cloned from GitHub. + +.. code-block:: console + + $ cd pygments + +Select a matching module under ``pygments/lexers``, or create a new module for +your lexer class. + +.. note:: + + We encourage you to put your lexer class into its own module, unless it's a + very small derivative of an already existing lexer. + +Next, make sure the lexer is known from outside of the module. All modules in +the ``pygments.lexers`` package specify ``__all__``. For example, +``esoteric.py`` sets:: + + __all__ = ['BrainfuckLexer', 'BefungeLexer', ...] + +Add the name of your lexer class to this list (or create the list if your lexer +is the only class in the module). + +Finally the lexer can be made publicly known by rebuilding the lexer mapping: + +.. code-block:: console + + $ make mapfiles + +To test the new lexer, store an example file with the proper extension in +``tests/examplefiles``. For example, to test your ``DiffLexer``, add a +``tests/examplefiles/example.diff`` containing a sample diff output. + +Now you can use ``python -m pygments`` from the current root of the checkout to +render your example to HTML: + +.. code-block:: console + + $ python -m pygments -O full -f html -o /tmp/example.html tests/examplefiles/example.diff + +Note that this explicitly calls the ``pygments`` module in the current +directory. This ensures your modifications are used. Otherwise a possibly +already installed, unmodified version without your new lexer would have been +called from the system search path (``$PATH``). + +To view the result, open ``/tmp/example.html`` in your browser. + +Once the example renders as expected, you should run the complete test suite: + +.. code-block:: console + + $ make test + +It also tests that your lexer fulfills the lexer API and certain invariants, +such as that the concatenation of all token text is the same as the input text. + + +Regex Flags +=========== + +You can either define regex flags locally in the regex (``r'(?x)foo bar'``) or +globally by adding a `flags` attribute to your lexer class. If no attribute is +defined, it defaults to `re.MULTILINE`. For more information about regular +expression flags see the page about `regular expressions`_ in the Python +documentation. + +.. _regular expressions: https://docs.python.org/library/re.html#regular-expression-syntax + + +Scanning multiple tokens at once +================================ + +So far, the `action` element in the rule tuple of regex, action and state has +been a single token type. Now we look at the first of several other possible +values. + +Here is a more complex lexer that highlights INI files. INI files consist of +sections, comments and ``key = value`` pairs:: + + from pygments.lexer import RegexLexer, bygroups + from pygments.token import * + + class IniLexer(RegexLexer): + name = 'INI' + aliases = ['ini', 'cfg'] + filenames = ['*.ini', '*.cfg'] + + tokens = { + 'root': [ + (r'\s+', Text), + (r';.*?$', Comment), + (r'\[.*?\]$', Keyword), + (r'(.*?)(\s*)(=)(\s*)(.*?)$', + bygroups(Name.Attribute, Text, Operator, Text, String)) + ] + } + +The lexer first looks for whitespace, comments and section names. Later it +looks for a line that looks like a key, value pair, separated by an ``'='`` +sign, and optional whitespace. + +The `bygroups` helper yields each capturing group in the regex with a different +token type. First the `Name.Attribute` token, then a `Text` token for the +optional whitespace, after that a `Operator` token for the equals sign. Then a +`Text` token for the whitespace again. The rest of the line is returned as +`String`. + +Note that for this to work, every part of the match must be inside a capturing +group (a ``(...)``), and there must not be any nested capturing groups. If you +nevertheless need a group, use a non-capturing group defined using this syntax: +``(?:some|words|here)`` (note the ``?:`` after the beginning parenthesis). + +If you find yourself needing a capturing group inside the regex which shouldn't +be part of the output but is used in the regular expressions for backreferencing +(eg: ``r'(<(foo|bar)>)(.*?)()'``), you can pass `None` to the bygroups +function and that group will be skipped in the output. + + +Changing states +=============== + +Many lexers need multiple states to work as expected. For example, some +languages allow multiline comments to be nested. Since this is a recursive +pattern it's impossible to lex just using regular expressions. + +Here is a lexer that recognizes C++ style comments (multi-line with ``/* */`` +and single-line with ``//`` until end of line):: + + from pygments.lexer import RegexLexer + from pygments.token import * + + class CppCommentLexer(RegexLexer): + name = 'Example Lexer with states' + + tokens = { + 'root': [ + (r'[^/]+', Text), + (r'/\*', Comment.Multiline, 'comment'), + (r'//.*?$', Comment.Singleline), + (r'/', Text) + ], + 'comment': [ + (r'[^*/]', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline) + ] + } + +This lexer starts lexing in the ``'root'`` state. It tries to match as much as +possible until it finds a slash (``'/'``). If the next character after the slash +is an asterisk (``'*'``) the `RegexLexer` sends those two characters to the +output stream marked as `Comment.Multiline` and continues lexing with the rules +defined in the ``'comment'`` state. + +If there wasn't an asterisk after the slash, the `RegexLexer` checks if it's a +Singleline comment (i.e. followed by a second slash). If this also wasn't the +case it must be a single slash, which is not a comment starter (the separate +regex for a single slash must also be given, else the slash would be marked as +an error token). + +Inside the ``'comment'`` state, we do the same thing again. Scan until the +lexer finds a star or slash. If it's the opening of a multiline comment, push +the ``'comment'`` state on the stack and continue scanning, again in the +``'comment'`` state. Else, check if it's the end of the multiline comment. If +yes, pop one state from the stack. + +Note: If you pop from an empty stack you'll get an `IndexError`. (There is an +easy way to prevent this from happening: don't ``'#pop'`` in the root state). + +If the `RegexLexer` encounters a newline that is flagged as an error token, the +stack is emptied and the lexer continues scanning in the ``'root'`` state. This +can help producing error-tolerant highlighting for erroneous input, e.g. when a +single-line string is not closed. + + +Advanced state tricks +===================== + +There are a few more things you can do with states: + +- You can push multiple states onto the stack if you give a tuple instead of a + simple string as the third item in a rule tuple. For example, if you want to + match a comment containing a directive, something like: + + .. code-block:: text + + /* rest of comment */ + + you can use this rule:: + + tokens = { + 'root': [ + (r'/\* <', Comment, ('comment', 'directive')), + ... + ], + 'directive': [ + (r'[^>]*', Comment.Directive), + (r'>', Comment, '#pop'), + ], + 'comment': [ + (r'[^*]+', Comment), + (r'\*/', Comment, '#pop'), + (r'\*', Comment), + ] + } + + When this encounters the above sample, first ``'comment'`` and ``'directive'`` + are pushed onto the stack, then the lexer continues in the directive state + until it finds the closing ``>``, then it continues in the comment state until + the closing ``*/``. Then, both states are popped from the stack again and + lexing continues in the root state. + + .. versionadded:: 0.9 + The tuple can contain the special ``'#push'`` and ``'#pop'`` (but not + ``'#pop:n'``) directives. + + +- You can include the rules of a state in the definition of another. This is + done by using `include` from `pygments.lexer`:: + + from pygments.lexer import RegexLexer, bygroups, include + from pygments.token import * + + class ExampleLexer(RegexLexer): + tokens = { + 'comments': [ + (r'/\*.*?\*/', Comment), + (r'//.*?\n', Comment), + ], + 'root': [ + include('comments'), + (r'(function )(\w+)( {)', + bygroups(Keyword, Name, Keyword), 'function'), + (r'.', Text), + ], + 'function': [ + (r'[^}/]+', Text), + include('comments'), + (r'/', Text), + (r'\}', Keyword, '#pop'), + ] + } + + This is a hypothetical lexer for a language that consist of functions and + comments. Because comments can occur at toplevel and in functions, we need + rules for comments in both states. As you can see, the `include` helper saves + repeating rules that occur more than once (in this example, the state + ``'comment'`` will never be entered by the lexer, as it's only there to be + included in ``'root'`` and ``'function'``). + +- Sometimes, you may want to "combine" a state from existing ones. This is + possible with the `combined` helper from `pygments.lexer`. + + If you, instead of a new state, write ``combined('state1', 'state2')`` as the + third item of a rule tuple, a new anonymous state will be formed from state1 + and state2 and if the rule matches, the lexer will enter this state. + + This is not used very often, but can be helpful in some cases, such as the + `PythonLexer`'s string literal processing. + +- If you want your lexer to start lexing in a different state you can modify the + stack by overriding the `get_tokens_unprocessed()` method:: + + from pygments.lexer import RegexLexer + + class ExampleLexer(RegexLexer): + tokens = {...} + + def get_tokens_unprocessed(self, text, stack=('root', 'otherstate')): + for item in RegexLexer.get_tokens_unprocessed(self, text, stack): + yield item + + Some lexers like the `PhpLexer` use this to make the leading ``', Name.Tag), + ], + 'script-content': [ + (r'(.+?)(<\s*/\s*script\s*>)', + bygroups(using(JavascriptLexer), Name.Tag), + '#pop'), + ] + } + +Here the content of a ```` end tag is processed by the `JavascriptLexer`, +while the end tag is yielded as a normal token with the `Name.Tag` type. + +Also note the ``(r'<\s*script\s*', Name.Tag, ('script-content', 'tag'))`` rule. +Here, two states are pushed onto the state stack, ``'script-content'`` and +``'tag'``. That means that first ``'tag'`` is processed, which will lex +attributes and the closing ``>``, then the ``'tag'`` state is popped and the +next state on top of the stack will be ``'script-content'``. + +Since you cannot refer to the class currently being defined, use `this` +(imported from `pygments.lexer`) to refer to the current lexer class, i.e. +``using(this)``. This construct may seem unnecessary, but this is often the +most obvious way of lexing arbitrary syntax between fixed delimiters without +introducing deeply nested states. + +The `using()` helper has a special keyword argument, `state`, which works as +follows: if given, the lexer to use initially is not in the ``"root"`` state, +but in the state given by this argument. This does not work with advanced +`RegexLexer` subclasses such as `ExtendedRegexLexer` (see below). + +Any other keywords arguments passed to `using()` are added to the keyword +arguments used to create the lexer. + + +Delegating Lexer +================ + +Another approach for nested lexers is the `DelegatingLexer` which is for example +used for the template engine lexers. It takes two lexers as arguments on +initialisation: a `root_lexer` and a `language_lexer`. + +The input is processed as follows: First, the whole text is lexed with the +`language_lexer`. All tokens yielded with the special type of ``Other`` are +then concatenated and given to the `root_lexer`. The language tokens of the +`language_lexer` are then inserted into the `root_lexer`'s token stream at the +appropriate positions. :: + + from pygments.lexer import DelegatingLexer + from pygments.lexers.web import HtmlLexer, PhpLexer + + class HtmlPhpLexer(DelegatingLexer): + def __init__(self, **options): + super().__init__(HtmlLexer, PhpLexer, **options) + +This procedure ensures that e.g. HTML with template tags in it is highlighted +correctly even if the template tags are put into HTML tags or attributes. + +If you want to change the needle token ``Other`` to something else, you can give +the lexer another token type as the third parameter:: + + DelegatingLexer.__init__(MyLexer, OtherLexer, Text, **options) + + +Callbacks +========= + +Sometimes the grammar of a language is so complex that a lexer would be unable +to process it just by using regular expressions and stacks. + +For this, the `RegexLexer` allows callbacks to be given in rule tuples, instead +of token types (`bygroups` and `using` are nothing else but preimplemented +callbacks). The callback must be a function taking two arguments: + +* the lexer itself +* the match object for the last matched rule + +The callback must then return an iterable of (or simply yield) ``(index, +tokentype, value)`` tuples, which are then just passed through by +`get_tokens_unprocessed()`. The ``index`` here is the position of the token in +the input string, ``tokentype`` is the normal token type (like `Name.Builtin`), +and ``value`` the associated part of the input string. + +You can see an example here:: + + from pygments.lexer import RegexLexer + from pygments.token import Generic + + class HypotheticLexer(RegexLexer): + + def headline_callback(lexer, match): + equal_signs = match.group(1) + text = match.group(2) + yield match.start(), Generic.Headline, equal_signs + text + equal_signs + + tokens = { + 'root': [ + (r'(=+)(.*?)(\1)', headline_callback) + ] + } + +If the regex for the `headline_callback` matches, the function is called with +the match object. Note that after the callback is done, processing continues +normally, that is, after the end of the previous match. The callback has no +possibility to influence the position. + +There are not really any simple examples for lexer callbacks, but you can see +them in action e.g. in the `SMLLexer` class in `ml.py`_. + +.. _ml.py: https://github.com/pygments/pygments/blob/master/pygments/lexers/ml.py + + +The ExtendedRegexLexer class +============================ + +The `RegexLexer`, even with callbacks, unfortunately isn't powerful enough for +the funky syntax rules of languages such as Ruby. + +But fear not; even then you don't have to abandon the regular expression +approach: Pygments has a subclass of `RegexLexer`, the `ExtendedRegexLexer`. +All features known from RegexLexers are available here too, and the tokens are +specified in exactly the same way, *except* for one detail: + +The `get_tokens_unprocessed()` method holds its internal state data not as local +variables, but in an instance of the `pygments.lexer.LexerContext` class, and +that instance is passed to callbacks as a third argument. This means that you +can modify the lexer state in callbacks. + +The `LexerContext` class has the following members: + +* `text` -- the input text +* `pos` -- the current starting position that is used for matching regexes +* `stack` -- a list containing the state stack +* `end` -- the maximum position to which regexes are matched, this defaults to + the length of `text` + +Additionally, the `get_tokens_unprocessed()` method can be given a +`LexerContext` instead of a string and will then process this context instead of +creating a new one for the string argument. + +Note that because you can set the current position to anything in the callback, +it won't be automatically be set by the caller after the callback is finished. +For example, this is how the hypothetical lexer above would be written with the +`ExtendedRegexLexer`:: + + from pygments.lexer import ExtendedRegexLexer + from pygments.token import Generic + + class ExHypotheticLexer(ExtendedRegexLexer): + + def headline_callback(lexer, match, ctx): + equal_signs = match.group(1) + text = match.group(2) + yield match.start(), Generic.Headline, equal_signs + text + equal_signs + ctx.pos = match.end() + + tokens = { + 'root': [ + (r'(=+)(.*?)(\1)', headline_callback) + ] + } + +This might sound confusing (and it can really be). But it is needed, and for an +example look at the Ruby lexer in `ruby.py`_. + +.. _ruby.py: https://github.com/pygments/pygments/blob/master/pygments/lexers/ruby.py + + +Handling Lists of Keywords +========================== + +For a relatively short list (hundreds) you can construct an optimized regular +expression directly using ``words()`` (longer lists, see next section). This +function handles a few things for you automatically, including escaping +metacharacters and Python's first-match rather than longest-match in +alternations. Feel free to put the lists themselves in +``pygments/lexers/_$lang_builtins.py`` (see examples there), and generated by +code if possible. + +An example of using ``words()`` is something like:: + + from pygments.lexer import RegexLexer, words, Name + + class MyLexer(RegexLexer): + + tokens = { + 'root': [ + (words(('else', 'elseif'), suffix=r'\b'), Name.Builtin), + (r'\w+', Name), + ], + } + +As you can see, you can add ``prefix`` and ``suffix`` parts to the constructed +regex. + + +Modifying Token Streams +======================= + +Some languages ship a lot of builtin functions (for example PHP). The total +amount of those functions differs from system to system because not everybody +has every extension installed. In the case of PHP there are over 3000 builtin +functions. That's an incredibly huge amount of functions, much more than you +want to put into a regular expression. + +But because only `Name` tokens can be function names this is solvable by +overriding the ``get_tokens_unprocessed()`` method. The following lexer +subclasses the `PythonLexer` so that it highlights some additional names as +pseudo keywords:: + + from pygments.lexers.python import PythonLexer + from pygments.token import Name, Keyword + + class MyPythonLexer(PythonLexer): + EXTRA_KEYWORDS = set(('foo', 'bar', 'foobar', 'barfoo', 'spam', 'eggs')) + + def get_tokens_unprocessed(self, text): + for index, token, value in PythonLexer.get_tokens_unprocessed(self, text): + if token is Name and value in self.EXTRA_KEYWORDS: + yield index, Keyword.Pseudo, value + else: + yield index, token, value + +The `PhpLexer` and `LuaLexer` use this method to resolve builtin functions. diff --git a/doc/docs/lexers.rst b/doc/docs/lexers.rst old mode 100644 new mode 100755 index ef40f14..86a59c4 --- a/doc/docs/lexers.rst +++ b/doc/docs/lexers.rst @@ -1,69 +1,69 @@ -.. -*- mode: rst -*- - -================ -Available lexers -================ - -This page lists all available builtin lexers and the options they take. - -Currently, **all lexers** support these options: - -`stripnl` - Strip leading and trailing newlines from the input (default: ``True``) - -`stripall` - Strip all leading and trailing whitespace from the input (default: - ``False``). - -`ensurenl` - Make sure that the input ends with a newline (default: ``True``). This - is required for some lexers that consume input linewise. - - .. versionadded:: 1.3 - -`tabsize` - If given and greater than 0, expand tabs in the input (default: ``0``). - -`encoding` - If given, must be an encoding name (such as ``"utf-8"``). This encoding - will be used to convert the input string to Unicode (if it is not already - a Unicode string). The default is ``"guess"``. - - If this option is set to ``"guess"``, a simple UTF-8 vs. Latin-1 - detection is used, if it is set to ``"chardet"``, the - `chardet library `_ is used to - guess the encoding of the input. - - .. versionadded:: 0.6 - - -The "Short Names" field lists the identifiers that can be used with the -`get_lexer_by_name()` function. - -These lexers are builtin and can be imported from `pygments.lexers`: - -.. pygmentsdoc:: lexers - - -Iterating over all lexers -------------------------- - -.. versionadded:: 0.6 - -To get all lexers (both the builtin and the plugin ones), you can -use the `get_all_lexers()` function from the `pygments.lexers` -module: - -.. sourcecode:: pycon - - >>> from pygments.lexers import get_all_lexers - >>> i = get_all_lexers() - >>> i.next() - ('Diff', ('diff',), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')) - >>> i.next() - ('Delphi', ('delphi', 'objectpascal', 'pas', 'pascal'), ('*.pas',), ('text/x-pascal',)) - >>> i.next() - ('XML+Ruby', ('xml+erb', 'xml+ruby'), (), ()) - -As you can see, the return value is an iterator which yields tuples -in the form ``(name, aliases, filetypes, mimetypes)``. +.. -*- mode: rst -*- + +================ +Available lexers +================ + +This page lists all available builtin lexers and the options they take. + +Currently, **all lexers** support these options: + +`stripnl` + Strip leading and trailing newlines from the input (default: ``True``) + +`stripall` + Strip all leading and trailing whitespace from the input (default: + ``False``). + +`ensurenl` + Make sure that the input ends with a newline (default: ``True``). This + is required for some lexers that consume input linewise. + + .. versionadded:: 1.3 + +`tabsize` + If given and greater than 0, expand tabs in the input (default: ``0``). + +`encoding` + If given, must be an encoding name (such as ``"utf-8"``). This encoding + will be used to convert the input string to Unicode (if it is not already + a Unicode string). The default is ``"guess"``. + + If this option is set to ``"guess"``, a simple UTF-8 vs. Latin-1 + detection is used, if it is set to ``"chardet"``, the + `chardet library `_ is used to + guess the encoding of the input. + + .. versionadded:: 0.6 + + +The "Short Names" field lists the identifiers that can be used with the +`get_lexer_by_name()` function. + +These lexers are builtin and can be imported from `pygments.lexers`: + +.. pygmentsdoc:: lexers + + +Iterating over all lexers +------------------------- + +.. versionadded:: 0.6 + +To get all lexers (both the builtin and the plugin ones), you can +use the `get_all_lexers()` function from the `pygments.lexers` +module: + +.. sourcecode:: pycon + + >>> from pygments.lexers import get_all_lexers + >>> i = get_all_lexers() + >>> i.next() + ('Diff', ('diff',), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')) + >>> i.next() + ('Delphi', ('delphi', 'objectpascal', 'pas', 'pascal'), ('*.pas',), ('text/x-pascal',)) + >>> i.next() + ('XML+Ruby', ('xml+erb', 'xml+ruby'), (), ()) + +As you can see, the return value is an iterator which yields tuples +in the form ``(name, aliases, filetypes, mimetypes)``. diff --git a/doc/docs/moinmoin.rst b/doc/docs/moinmoin.rst old mode 100644 new mode 100755 index 80ed25c..d282a87 --- a/doc/docs/moinmoin.rst +++ b/doc/docs/moinmoin.rst @@ -1,39 +1,39 @@ -.. -*- mode: rst -*- - -============================ -Using Pygments with MoinMoin -============================ - -From Pygments 0.7, the source distribution ships a `Moin`_ parser plugin that -can be used to get Pygments highlighting in Moin wiki pages. - -To use it, copy the file `external/moin-parser.py` from the Pygments -distribution to the `data/plugin/parser` subdirectory of your Moin instance. -Edit the options at the top of the file (currently ``ATTACHMENTS`` and -``INLINESTYLES``) and rename the file to the name that the parser directive -should have. For example, if you name the file ``code.py``, you can get a -highlighted Python code sample with this Wiki markup:: - - {{{ - #!code python - [...] - }}} - -where ``python`` is the Pygments name of the lexer to use. - -Additionally, if you set the ``ATTACHMENTS`` option to True, Pygments will also -be called for all attachments for whose filenames there is no other parser -registered. - -You are responsible for including CSS rules that will map the Pygments CSS -classes to colors. You can output a stylesheet file with `pygmentize`, put it -into the `htdocs` directory of your Moin instance and then include it in the -`stylesheets` configuration option in the Moin config, e.g.:: - - stylesheets = [('screen', '/htdocs/pygments.css')] - -If you do not want to do that and are willing to accept larger HTML output, you -can set the ``INLINESTYLES`` option to True. - - -.. _Moin: https://moinmo.in/ +.. -*- mode: rst -*- + +============================ +Using Pygments with MoinMoin +============================ + +From Pygments 0.7, the source distribution ships a `Moin`_ parser plugin that +can be used to get Pygments highlighting in Moin wiki pages. + +To use it, copy the file `external/moin-parser.py` from the Pygments +distribution to the `data/plugin/parser` subdirectory of your Moin instance. +Edit the options at the top of the file (currently ``ATTACHMENTS`` and +``INLINESTYLES``) and rename the file to the name that the parser directive +should have. For example, if you name the file ``code.py``, you can get a +highlighted Python code sample with this Wiki markup:: + + {{{ + #!code python + [...] + }}} + +where ``python`` is the Pygments name of the lexer to use. + +Additionally, if you set the ``ATTACHMENTS`` option to True, Pygments will also +be called for all attachments for whose filenames there is no other parser +registered. + +You are responsible for including CSS rules that will map the Pygments CSS +classes to colors. You can output a stylesheet file with `pygmentize`, put it +into the `htdocs` directory of your Moin instance and then include it in the +`stylesheets` configuration option in the Moin config, e.g.:: + + stylesheets = [('screen', '/htdocs/pygments.css')] + +If you do not want to do that and are willing to accept larger HTML output, you +can set the ``INLINESTYLES`` option to True. + + +.. _Moin: https://moinmo.in/ diff --git a/doc/docs/plugins.rst b/doc/docs/plugins.rst old mode 100644 new mode 100755 index 1008013..4d37c9a --- a/doc/docs/plugins.rst +++ b/doc/docs/plugins.rst @@ -1,93 +1,93 @@ -================ -Register Plugins -================ - -If you want to extend Pygments without hacking the sources, but want to -use the lexer/formatter/style/filter lookup functions (`lexers.get_lexer_by_name` -et al.), you can use `setuptools`_ entrypoints to add new lexers, formatters -or styles as if they were in the Pygments core. - -.. _setuptools: https://pypi.org/project/setuptools/ - -That means you can use your highlighter modules with the `pygmentize` script, -which relies on the mentioned functions. - - -Entrypoints -=========== - -Here is a list of setuptools entrypoints that Pygments understands: - -`pygments.lexers` - - This entrypoint is used for adding new lexers to the Pygments core. - The name of the entrypoint values doesn't really matter, Pygments extracts - required metadata from the class definition: - - .. sourcecode:: ini - - [pygments.lexers] - yourlexer = yourmodule:YourLexer - - Note that you have to define ``name``, ``aliases`` and ``filename`` - attributes so that you can use the highlighter from the command line: - - .. sourcecode:: python - - class YourLexer(...): - name = 'Name Of Your Lexer' - aliases = ['alias'] - filenames = ['*.ext'] - - -`pygments.formatters` - - You can use this entrypoint to add new formatters to Pygments. The - name of an entrypoint item is the name of the formatter. If you - prefix the name with a slash it's used as a filename pattern: - - .. sourcecode:: ini - - [pygments.formatters] - yourformatter = yourmodule:YourFormatter - /.ext = yourmodule:YourFormatter - - -`pygments.styles` - - To add a new style you can use this entrypoint. The name of the entrypoint - is the name of the style: - - .. sourcecode:: ini - - [pygments.styles] - yourstyle = yourmodule:YourStyle - - -`pygments.filters` - - Use this entrypoint to register a new filter. The name of the - entrypoint is the name of the filter: - - .. sourcecode:: ini - - [pygments.filters] - yourfilter = yourmodule:YourFilter - - -How To Use Entrypoints -====================== - -This documentation doesn't explain how to use those entrypoints because this is -covered in the `setuptools documentation`_. That page should cover everything -you need to write a plugin. - -.. _setuptools documentation: https://setuptools.readthedocs.io/en/latest/ - - -Extending The Core -================== - -If you have written a Pygments plugin that is open source, please inform us -about that. There is a high chance that we'll add it to the Pygments -distribution. +================ +Register Plugins +================ + +If you want to extend Pygments without hacking the sources, but want to +use the lexer/formatter/style/filter lookup functions (`lexers.get_lexer_by_name` +et al.), you can use `setuptools`_ entrypoints to add new lexers, formatters +or styles as if they were in the Pygments core. + +.. _setuptools: https://pypi.org/project/setuptools/ + +That means you can use your highlighter modules with the `pygmentize` script, +which relies on the mentioned functions. + + +Entrypoints +=========== + +Here is a list of setuptools entrypoints that Pygments understands: + +`pygments.lexers` + + This entrypoint is used for adding new lexers to the Pygments core. + The name of the entrypoint values doesn't really matter, Pygments extracts + required metadata from the class definition: + + .. sourcecode:: ini + + [pygments.lexers] + yourlexer = yourmodule:YourLexer + + Note that you have to define ``name``, ``aliases`` and ``filename`` + attributes so that you can use the highlighter from the command line: + + .. sourcecode:: python + + class YourLexer(...): + name = 'Name Of Your Lexer' + aliases = ['alias'] + filenames = ['*.ext'] + + +`pygments.formatters` + + You can use this entrypoint to add new formatters to Pygments. The + name of an entrypoint item is the name of the formatter. If you + prefix the name with a slash it's used as a filename pattern: + + .. sourcecode:: ini + + [pygments.formatters] + yourformatter = yourmodule:YourFormatter + /.ext = yourmodule:YourFormatter + + +`pygments.styles` + + To add a new style you can use this entrypoint. The name of the entrypoint + is the name of the style: + + .. sourcecode:: ini + + [pygments.styles] + yourstyle = yourmodule:YourStyle + + +`pygments.filters` + + Use this entrypoint to register a new filter. The name of the + entrypoint is the name of the filter: + + .. sourcecode:: ini + + [pygments.filters] + yourfilter = yourmodule:YourFilter + + +How To Use Entrypoints +====================== + +This documentation doesn't explain how to use those entrypoints because this is +covered in the `setuptools documentation`_. That page should cover everything +you need to write a plugin. + +.. _setuptools documentation: https://setuptools.readthedocs.io/en/latest/ + + +Extending The Core +================== + +If you have written a Pygments plugin that is open source, please inform us +about that. There is a high chance that we'll add it to the Pygments +distribution. diff --git a/doc/docs/quickstart.rst b/doc/docs/quickstart.rst old mode 100644 new mode 100755 index 3a823e7..3efb3c6 --- a/doc/docs/quickstart.rst +++ b/doc/docs/quickstart.rst @@ -1,205 +1,205 @@ -.. -*- mode: rst -*- - -=========================== -Introduction and Quickstart -=========================== - - -Welcome to Pygments! This document explains the basic concepts and terms and -gives a few examples of how to use the library. - - -Architecture -============ - -There are four types of components that work together highlighting a piece of -code: - -* A **lexer** splits the source into tokens, fragments of the source that - have a token type that determines what the text represents semantically - (e.g., keyword, string, or comment). There is a lexer for every language - or markup format that Pygments supports. -* The token stream can be piped through **filters**, which usually modify - the token types or text fragments, e.g. uppercasing all keywords. -* A **formatter** then takes the token stream and writes it to an output - file, in a format such as HTML, LaTeX or RTF. -* While writing the output, a **style** determines how to highlight all the - different token types. It maps them to attributes like "red and bold". - - -Example -======= - -Here is a small example for highlighting Python code: - -.. sourcecode:: python - - from pygments import highlight - from pygments.lexers import PythonLexer - from pygments.formatters import HtmlFormatter - - code = 'print "Hello World"' - print(highlight(code, PythonLexer(), HtmlFormatter())) - -which prints something like this: - -.. sourcecode:: html - -
-
print "Hello World"
-
- -As you can see, Pygments uses CSS classes (by default, but you can change that) -instead of inline styles in order to avoid outputting redundant style information over -and over. A CSS stylesheet that contains all CSS classes possibly used in the output -can be produced by: - -.. sourcecode:: python - - print(HtmlFormatter().get_style_defs('.highlight')) - -The argument to :func:`get_style_defs` is used as an additional CSS selector: -the output may look like this: - -.. sourcecode:: css - - .highlight .k { color: #AA22FF; font-weight: bold } - .highlight .s { color: #BB4444 } - ... - - -Options -======= - -The :func:`highlight()` function supports a fourth argument called *outfile*, it -must be a file object if given. The formatted output will then be written to -this file instead of being returned as a string. - -Lexers and formatters both support options. They are given to them as keyword -arguments either to the class or to the lookup method: - -.. sourcecode:: python - - from pygments import highlight - from pygments.lexers import get_lexer_by_name - from pygments.formatters import HtmlFormatter - - lexer = get_lexer_by_name("python", stripall=True) - formatter = HtmlFormatter(linenos=True, cssclass="source") - result = highlight(code, lexer, formatter) - -This makes the lexer strip all leading and trailing whitespace from the input -(`stripall` option), lets the formatter output line numbers (`linenos` option), -and sets the wrapping ``
``'s class to ``source`` (instead of -``highlight``). - -Important options include: - -`encoding` : for lexers and formatters - Since Pygments uses Unicode strings internally, this determines which - encoding will be used to convert to or from byte strings. -`style` : for formatters - The name of the style to use when writing the output. - - -For an overview of builtin lexers and formatters and their options, visit the -:doc:`lexer ` and :doc:`formatters ` lists. - -For a documentation on filters, see :doc:`this page `. - - -Lexer and formatter lookup -========================== - -If you want to lookup a built-in lexer by its alias or a filename, you can use -one of the following methods: - -.. sourcecode:: pycon - - >>> from pygments.lexers import (get_lexer_by_name, - ... get_lexer_for_filename, get_lexer_for_mimetype) - - >>> get_lexer_by_name('python') - - - >>> get_lexer_for_filename('spam.rb') - - - >>> get_lexer_for_mimetype('text/x-perl') - - -All these functions accept keyword arguments; they will be passed to the lexer -as options. - -A similar API is available for formatters: use :func:`.get_formatter_by_name()` -and :func:`.get_formatter_for_filename()` from the :mod:`pygments.formatters` -module for this purpose. - - -Guessing lexers -=============== - -If you don't know the content of the file, or you want to highlight a file -whose extension is ambiguous, such as ``.html`` (which could contain plain HTML -or some template tags), use these functions: - -.. sourcecode:: pycon - - >>> from pygments.lexers import guess_lexer, guess_lexer_for_filename - - >>> guess_lexer('#!/usr/bin/python\nprint "Hello World!"') - - - >>> guess_lexer_for_filename('test.py', 'print "Hello World!"') - - -:func:`.guess_lexer()` passes the given content to the lexer classes' -:meth:`analyse_text()` method and returns the one for which it returns the -highest number. - -All lexers have two different filename pattern lists: the primary and the -secondary one. The :func:`.get_lexer_for_filename()` function only uses the -primary list, whose entries are supposed to be unique among all lexers. -:func:`.guess_lexer_for_filename()`, however, will first loop through all lexers -and look at the primary and secondary filename patterns if the filename matches. -If only one lexer matches, it is returned, else the guessing mechanism of -:func:`.guess_lexer()` is used with the matching lexers. - -As usual, keyword arguments to these functions are given to the created lexer -as options. - - -Command line usage -================== - -You can use Pygments from the command line, using the :program:`pygmentize` -script:: - - $ pygmentize test.py - -will highlight the Python file test.py using ANSI escape sequences -(a.k.a. terminal colors) and print the result to standard output. - -To output HTML, use the ``-f`` option:: - - $ pygmentize -f html -o test.html test.py - -to write an HTML-highlighted version of test.py to the file test.html. -Note that it will only be a snippet of HTML, if you want a full HTML document, -use the "full" option:: - - $ pygmentize -f html -O full -o test.html test.py - -This will produce a full HTML document with included stylesheet. - -A style can be selected with ``-O style=``. - -If you need a stylesheet for an existing HTML file using Pygments CSS classes, -it can be created with:: - - $ pygmentize -S default -f html > style.css - -where ``default`` is the style name. - -More options and tricks and be found in the :doc:`command line reference -`. +.. -*- mode: rst -*- + +=========================== +Introduction and Quickstart +=========================== + + +Welcome to Pygments! This document explains the basic concepts and terms and +gives a few examples of how to use the library. + + +Architecture +============ + +There are four types of components that work together highlighting a piece of +code: + +* A **lexer** splits the source into tokens, fragments of the source that + have a token type that determines what the text represents semantically + (e.g., keyword, string, or comment). There is a lexer for every language + or markup format that Pygments supports. +* The token stream can be piped through **filters**, which usually modify + the token types or text fragments, e.g. uppercasing all keywords. +* A **formatter** then takes the token stream and writes it to an output + file, in a format such as HTML, LaTeX or RTF. +* While writing the output, a **style** determines how to highlight all the + different token types. It maps them to attributes like "red and bold". + + +Example +======= + +Here is a small example for highlighting Python code: + +.. sourcecode:: python + + from pygments import highlight + from pygments.lexers import PythonLexer + from pygments.formatters import HtmlFormatter + + code = 'print "Hello World"' + print(highlight(code, PythonLexer(), HtmlFormatter())) + +which prints something like this: + +.. sourcecode:: html + +
+
print "Hello World"
+
+ +As you can see, Pygments uses CSS classes (by default, but you can change that) +instead of inline styles in order to avoid outputting redundant style information over +and over. A CSS stylesheet that contains all CSS classes possibly used in the output +can be produced by: + +.. sourcecode:: python + + print(HtmlFormatter().get_style_defs('.highlight')) + +The argument to :func:`get_style_defs` is used as an additional CSS selector: +the output may look like this: + +.. sourcecode:: css + + .highlight .k { color: #AA22FF; font-weight: bold } + .highlight .s { color: #BB4444 } + ... + + +Options +======= + +The :func:`highlight()` function supports a fourth argument called *outfile*, it +must be a file object if given. The formatted output will then be written to +this file instead of being returned as a string. + +Lexers and formatters both support options. They are given to them as keyword +arguments either to the class or to the lookup method: + +.. sourcecode:: python + + from pygments import highlight + from pygments.lexers import get_lexer_by_name + from pygments.formatters import HtmlFormatter + + lexer = get_lexer_by_name("python", stripall=True) + formatter = HtmlFormatter(linenos=True, cssclass="source") + result = highlight(code, lexer, formatter) + +This makes the lexer strip all leading and trailing whitespace from the input +(`stripall` option), lets the formatter output line numbers (`linenos` option), +and sets the wrapping ``
``'s class to ``source`` (instead of +``highlight``). + +Important options include: + +`encoding` : for lexers and formatters + Since Pygments uses Unicode strings internally, this determines which + encoding will be used to convert to or from byte strings. +`style` : for formatters + The name of the style to use when writing the output. + + +For an overview of builtin lexers and formatters and their options, visit the +:doc:`lexer ` and :doc:`formatters ` lists. + +For a documentation on filters, see :doc:`this page `. + + +Lexer and formatter lookup +========================== + +If you want to lookup a built-in lexer by its alias or a filename, you can use +one of the following methods: + +.. sourcecode:: pycon + + >>> from pygments.lexers import (get_lexer_by_name, + ... get_lexer_for_filename, get_lexer_for_mimetype) + + >>> get_lexer_by_name('python') + + + >>> get_lexer_for_filename('spam.rb') + + + >>> get_lexer_for_mimetype('text/x-perl') + + +All these functions accept keyword arguments; they will be passed to the lexer +as options. + +A similar API is available for formatters: use :func:`.get_formatter_by_name()` +and :func:`.get_formatter_for_filename()` from the :mod:`pygments.formatters` +module for this purpose. + + +Guessing lexers +=============== + +If you don't know the content of the file, or you want to highlight a file +whose extension is ambiguous, such as ``.html`` (which could contain plain HTML +or some template tags), use these functions: + +.. sourcecode:: pycon + + >>> from pygments.lexers import guess_lexer, guess_lexer_for_filename + + >>> guess_lexer('#!/usr/bin/python\nprint "Hello World!"') + + + >>> guess_lexer_for_filename('test.py', 'print "Hello World!"') + + +:func:`.guess_lexer()` passes the given content to the lexer classes' +:meth:`analyse_text()` method and returns the one for which it returns the +highest number. + +All lexers have two different filename pattern lists: the primary and the +secondary one. The :func:`.get_lexer_for_filename()` function only uses the +primary list, whose entries are supposed to be unique among all lexers. +:func:`.guess_lexer_for_filename()`, however, will first loop through all lexers +and look at the primary and secondary filename patterns if the filename matches. +If only one lexer matches, it is returned, else the guessing mechanism of +:func:`.guess_lexer()` is used with the matching lexers. + +As usual, keyword arguments to these functions are given to the created lexer +as options. + + +Command line usage +================== + +You can use Pygments from the command line, using the :program:`pygmentize` +script:: + + $ pygmentize test.py + +will highlight the Python file test.py using ANSI escape sequences +(a.k.a. terminal colors) and print the result to standard output. + +To output HTML, use the ``-f`` option:: + + $ pygmentize -f html -o test.html test.py + +to write an HTML-highlighted version of test.py to the file test.html. +Note that it will only be a snippet of HTML, if you want a full HTML document, +use the "full" option:: + + $ pygmentize -f html -O full -o test.html test.py + +This will produce a full HTML document with included stylesheet. + +A style can be selected with ``-O style=``. + +If you need a stylesheet for an existing HTML file using Pygments CSS classes, +it can be created with:: + + $ pygmentize -S default -f html > style.css + +where ``default`` is the style name. + +More options and tricks and be found in the :doc:`command line reference +`. diff --git a/doc/docs/rstdirective.rst b/doc/docs/rstdirective.rst old mode 100644 new mode 100755 index edc117d..35142f1 --- a/doc/docs/rstdirective.rst +++ b/doc/docs/rstdirective.rst @@ -1,22 +1,22 @@ -.. -*- mode: rst -*- - -================================ -Using Pygments in ReST documents -================================ - -Many Python people use `ReST`_ for documentation their sourcecode, programs, -scripts et cetera. This also means that documentation often includes sourcecode -samples or snippets. - -You can easily enable Pygments support for your ReST texts using a custom -directive -- this is also how this documentation displays source code. - -From Pygments 0.9, the directive is shipped in the distribution as -`external/rst-directive.py`. You can copy and adapt this code to your liking. - -.. removed -- too confusing - *Loosely related note:* The ReST lexer now recognizes ``.. sourcecode::`` and - ``.. code::`` directives and highlights the contents in the specified language - if the `handlecodeblocks` option is true. - -.. _ReST: https://docutils.sourceforge.io/rst.html +.. -*- mode: rst -*- + +================================ +Using Pygments in ReST documents +================================ + +Many Python people use `ReST`_ for documentation their sourcecode, programs, +scripts et cetera. This also means that documentation often includes sourcecode +samples or snippets. + +You can easily enable Pygments support for your ReST texts using a custom +directive -- this is also how this documentation displays source code. + +From Pygments 0.9, the directive is shipped in the distribution as +`external/rst-directive.py`. You can copy and adapt this code to your liking. + +.. removed -- too confusing + *Loosely related note:* The ReST lexer now recognizes ``.. sourcecode::`` and + ``.. code::`` directives and highlights the contents in the specified language + if the `handlecodeblocks` option is true. + +.. _ReST: https://docutils.sourceforge.io/rst.html diff --git a/doc/docs/styles.rst b/doc/docs/styles.rst old mode 100644 new mode 100755 index 570293a..3d48c3b --- a/doc/docs/styles.rst +++ b/doc/docs/styles.rst @@ -1,232 +1,232 @@ -.. -*- mode: rst -*- - -====== -Styles -====== - -Pygments comes with some builtin styles that work for both the HTML and -LaTeX formatter. - -The builtin styles can be looked up with the `get_style_by_name` function: - -.. sourcecode:: pycon - - >>> from pygments.styles import get_style_by_name - >>> get_style_by_name('colorful') - - -You can pass a instance of a `Style` class to a formatter as the `style` -option in form of a string: - -.. sourcecode:: pycon - - >>> from pygments.styles import get_style_by_name - >>> from pygments.formatters import HtmlFormatter - >>> HtmlFormatter(style='colorful').style - - -Or you can also import your own style (which must be a subclass of -`pygments.style.Style`) and pass it to the formatter: - -.. sourcecode:: pycon - - >>> from yourapp.yourmodule import YourStyle - >>> from pygments.formatters import HtmlFormatter - >>> HtmlFormatter(style=YourStyle).style - - - -Creating Own Styles -=================== - -So, how to create a style? All you have to do is to subclass `Style` and -define some styles: - -.. sourcecode:: python - - from pygments.style import Style - from pygments.token import Keyword, Name, Comment, String, Error, \ - Number, Operator, Generic - - class YourStyle(Style): - default_style = "" - styles = { - Comment: 'italic #888', - Keyword: 'bold #005', - Name: '#f00', - Name.Function: '#0f0', - Name.Class: 'bold #0f0', - String: 'bg:#eee #111' - } - -That's it. There are just a few rules. When you define a style for `Name` -the style automatically also affects `Name.Function` and so on. If you -defined ``'bold'`` and you don't want boldface for a subtoken use ``'nobold'``. - -(Philosophy: the styles aren't written in CSS syntax since this way -they can be used for a variety of formatters.) - -`default_style` is the style inherited by all token types. - -To make the style usable for Pygments, you must - -* either register it as a plugin (see :doc:`the plugin docs `) -* or drop it into the `styles` subpackage of your Pygments distribution one style - class per style, where the file name is the style name and the class name is - `StylenameClass`. For example, if your style should be called - ``"mondrian"``, name the class `MondrianStyle`, put it into the file - ``mondrian.py`` and this file into the ``pygments.styles`` subpackage - directory. - - -Style Rules -=========== - -Here a small overview of all allowed styles: - -``bold`` - render text as bold -``nobold`` - don't render text as bold (to prevent subtokens being highlighted bold) -``italic`` - render text italic -``noitalic`` - don't render text as italic -``underline`` - render text underlined -``nounderline`` - don't render text underlined -``bg:`` - transparent background -``bg:#000000`` - background color (black) -``border:`` - no border -``border:#ffffff`` - border color (white) -``#ff0000`` - text color (red) -``noinherit`` - don't inherit styles from supertoken - -Note that there may not be a space between ``bg:`` and the color value -since the style definition string is split at whitespace. -Also, using named colors is not allowed since the supported color names -vary for different formatters. - -Furthermore, not all lexers might support every style. - - -Builtin Styles -============== - -Pygments ships some builtin styles which are maintained by the Pygments team. - -To get a list of known styles you can use this snippet: - -.. sourcecode:: pycon - - >>> from pygments.styles import STYLE_MAP - >>> STYLE_MAP.keys() - ['default', 'emacs', 'friendly', 'colorful'] - - -Getting a list of available styles -================================== - -.. versionadded:: 0.6 - -Because it could be that a plugin registered a style, there is -a way to iterate over all styles: - -.. sourcecode:: pycon - - >>> from pygments.styles import get_all_styles - >>> styles = list(get_all_styles()) - - -.. _AnsiTerminalStyle: - -Terminal Styles -=============== - -.. versionadded:: 2.2 - -Custom styles used with the 256-color terminal formatter can also map colors to -use the 8 default ANSI colors. To do so, use ``ansigreen``, ``ansibrightred`` or -any other colors defined in :attr:`pygments.style.ansicolors`. Foreground ANSI -colors will be mapped to the corresponding `escape codes 30 to 37 -`_ thus respecting any -custom color mapping and themes provided by many terminal emulators. Light -variants are treated as foreground color with and an added bold flag. -``bg:ansi`` will also be respected, except the light variant will be the -same shade as their dark variant. - -See the following example where the color of the string ``"hello world"`` is -governed by the escape sequence ``\x1b[34;01m`` (Ansi bright blue, Bold, 41 being red -background) instead of an extended foreground & background color. - -.. sourcecode:: pycon - - >>> from pygments import highlight - >>> from pygments.style import Style - >>> from pygments.token import Token - >>> from pygments.lexers import Python3Lexer - >>> from pygments.formatters import Terminal256Formatter - - >>> class MyStyle(Style): - styles = { - Token.String: 'ansibrightblue bg:ansibrightred', - } - - >>> code = 'print("Hello World")' - >>> result = highlight(code, Python3Lexer(), Terminal256Formatter(style=MyStyle)) - >>> print(result.encode()) - b'\x1b[34;41;01m"\x1b[39;49;00m\x1b[34;41;01mHello World\x1b[39;49;00m\x1b[34;41;01m"\x1b[39;49;00m' - -Colors specified using ``ansi*`` are converted to a default set of RGB colors -when used with formatters other than the terminal-256 formatter. - -By definition of ANSI, the following colors are considered "light" colors, and -will be rendered by most terminals as bold: - -- "brightblack" (darkgrey), "brightred", "brightgreen", "brightyellow", "brightblue", - "brightmagenta", "brightcyan", "white" - -The following are considered "dark" colors and will be rendered as non-bold: - -- "black", "red", "green", "yellow", "blue", "magenta", "cyan", - "gray" - -Exact behavior might depends on the terminal emulator you are using, and its -settings. - -.. _new-ansi-color-names: - -.. versionchanged:: 2.4 - -The definition of the ANSI color names has changed. -New names are easier to understand and align to the colors used in other projects. - -===================== ==================== -New names Pygments up to 2.3 -===================== ==================== -``ansiblack`` ``#ansiblack`` -``ansired`` ``#ansidarkred`` -``ansigreen`` ``#ansidarkgreen`` -``ansiyellow`` ``#ansibrown`` -``ansiblue`` ``#ansidarkblue`` -``ansimagenta`` ``#ansipurple`` -``ansicyan`` ``#ansiteal`` -``ansigray`` ``#ansilightgray`` -``ansibrightblack`` ``#ansidarkgray`` -``ansibrightred`` ``#ansired`` -``ansibrightgreen`` ``#ansigreen`` -``ansibrightyellow`` ``#ansiyellow`` -``ansibrightblue`` ``#ansiblue`` -``ansibrightmagenta`` ``#ansifuchsia`` -``ansibrightcyan`` ``#ansiturquoise`` -``ansiwhite`` ``#ansiwhite`` -===================== ==================== - -Old ANSI color names are deprecated but will still work. +.. -*- mode: rst -*- + +====== +Styles +====== + +Pygments comes with some builtin styles that work for both the HTML and +LaTeX formatter. + +The builtin styles can be looked up with the `get_style_by_name` function: + +.. sourcecode:: pycon + + >>> from pygments.styles import get_style_by_name + >>> get_style_by_name('colorful') + + +You can pass a instance of a `Style` class to a formatter as the `style` +option in form of a string: + +.. sourcecode:: pycon + + >>> from pygments.styles import get_style_by_name + >>> from pygments.formatters import HtmlFormatter + >>> HtmlFormatter(style='colorful').style + + +Or you can also import your own style (which must be a subclass of +`pygments.style.Style`) and pass it to the formatter: + +.. sourcecode:: pycon + + >>> from yourapp.yourmodule import YourStyle + >>> from pygments.formatters import HtmlFormatter + >>> HtmlFormatter(style=YourStyle).style + + + +Creating Own Styles +=================== + +So, how to create a style? All you have to do is to subclass `Style` and +define some styles: + +.. sourcecode:: python + + from pygments.style import Style + from pygments.token import Keyword, Name, Comment, String, Error, \ + Number, Operator, Generic + + class YourStyle(Style): + default_style = "" + styles = { + Comment: 'italic #888', + Keyword: 'bold #005', + Name: '#f00', + Name.Function: '#0f0', + Name.Class: 'bold #0f0', + String: 'bg:#eee #111' + } + +That's it. There are just a few rules. When you define a style for `Name` +the style automatically also affects `Name.Function` and so on. If you +defined ``'bold'`` and you don't want boldface for a subtoken use ``'nobold'``. + +(Philosophy: the styles aren't written in CSS syntax since this way +they can be used for a variety of formatters.) + +`default_style` is the style inherited by all token types. + +To make the style usable for Pygments, you must + +* either register it as a plugin (see :doc:`the plugin docs `) +* or drop it into the `styles` subpackage of your Pygments distribution one style + class per style, where the file name is the style name and the class name is + `StylenameClass`. For example, if your style should be called + ``"mondrian"``, name the class `MondrianStyle`, put it into the file + ``mondrian.py`` and this file into the ``pygments.styles`` subpackage + directory. + + +Style Rules +=========== + +Here a small overview of all allowed styles: + +``bold`` + render text as bold +``nobold`` + don't render text as bold (to prevent subtokens being highlighted bold) +``italic`` + render text italic +``noitalic`` + don't render text as italic +``underline`` + render text underlined +``nounderline`` + don't render text underlined +``bg:`` + transparent background +``bg:#000000`` + background color (black) +``border:`` + no border +``border:#ffffff`` + border color (white) +``#ff0000`` + text color (red) +``noinherit`` + don't inherit styles from supertoken + +Note that there may not be a space between ``bg:`` and the color value +since the style definition string is split at whitespace. +Also, using named colors is not allowed since the supported color names +vary for different formatters. + +Furthermore, not all lexers might support every style. + + +Builtin Styles +============== + +Pygments ships some builtin styles which are maintained by the Pygments team. + +To get a list of known styles you can use this snippet: + +.. sourcecode:: pycon + + >>> from pygments.styles import STYLE_MAP + >>> STYLE_MAP.keys() + ['default', 'emacs', 'friendly', 'colorful'] + + +Getting a list of available styles +================================== + +.. versionadded:: 0.6 + +Because it could be that a plugin registered a style, there is +a way to iterate over all styles: + +.. sourcecode:: pycon + + >>> from pygments.styles import get_all_styles + >>> styles = list(get_all_styles()) + + +.. _AnsiTerminalStyle: + +Terminal Styles +=============== + +.. versionadded:: 2.2 + +Custom styles used with the 256-color terminal formatter can also map colors to +use the 8 default ANSI colors. To do so, use ``ansigreen``, ``ansibrightred`` or +any other colors defined in :attr:`pygments.style.ansicolors`. Foreground ANSI +colors will be mapped to the corresponding `escape codes 30 to 37 +`_ thus respecting any +custom color mapping and themes provided by many terminal emulators. Light +variants are treated as foreground color with and an added bold flag. +``bg:ansi`` will also be respected, except the light variant will be the +same shade as their dark variant. + +See the following example where the color of the string ``"hello world"`` is +governed by the escape sequence ``\x1b[34;01m`` (Ansi bright blue, Bold, 41 being red +background) instead of an extended foreground & background color. + +.. sourcecode:: pycon + + >>> from pygments import highlight + >>> from pygments.style import Style + >>> from pygments.token import Token + >>> from pygments.lexers import Python3Lexer + >>> from pygments.formatters import Terminal256Formatter + + >>> class MyStyle(Style): + styles = { + Token.String: 'ansibrightblue bg:ansibrightred', + } + + >>> code = 'print("Hello World")' + >>> result = highlight(code, Python3Lexer(), Terminal256Formatter(style=MyStyle)) + >>> print(result.encode()) + b'\x1b[34;41;01m"\x1b[39;49;00m\x1b[34;41;01mHello World\x1b[39;49;00m\x1b[34;41;01m"\x1b[39;49;00m' + +Colors specified using ``ansi*`` are converted to a default set of RGB colors +when used with formatters other than the terminal-256 formatter. + +By definition of ANSI, the following colors are considered "light" colors, and +will be rendered by most terminals as bold: + +- "brightblack" (darkgrey), "brightred", "brightgreen", "brightyellow", "brightblue", + "brightmagenta", "brightcyan", "white" + +The following are considered "dark" colors and will be rendered as non-bold: + +- "black", "red", "green", "yellow", "blue", "magenta", "cyan", + "gray" + +Exact behavior might depends on the terminal emulator you are using, and its +settings. + +.. _new-ansi-color-names: + +.. versionchanged:: 2.4 + +The definition of the ANSI color names has changed. +New names are easier to understand and align to the colors used in other projects. + +===================== ==================== +New names Pygments up to 2.3 +===================== ==================== +``ansiblack`` ``#ansiblack`` +``ansired`` ``#ansidarkred`` +``ansigreen`` ``#ansidarkgreen`` +``ansiyellow`` ``#ansibrown`` +``ansiblue`` ``#ansidarkblue`` +``ansimagenta`` ``#ansipurple`` +``ansicyan`` ``#ansiteal`` +``ansigray`` ``#ansilightgray`` +``ansibrightblack`` ``#ansidarkgray`` +``ansibrightred`` ``#ansired`` +``ansibrightgreen`` ``#ansigreen`` +``ansibrightyellow`` ``#ansiyellow`` +``ansibrightblue`` ``#ansiblue`` +``ansibrightmagenta`` ``#ansifuchsia`` +``ansibrightcyan`` ``#ansiturquoise`` +``ansiwhite`` ``#ansiwhite`` +===================== ==================== + +Old ANSI color names are deprecated but will still work. diff --git a/doc/docs/tokens.rst b/doc/docs/tokens.rst old mode 100644 new mode 100755 index 801fc63..4de11cc --- a/doc/docs/tokens.rst +++ b/doc/docs/tokens.rst @@ -1,372 +1,372 @@ -.. -*- mode: rst -*- - -============== -Builtin Tokens -============== - -.. module:: pygments.token - -In the :mod:`pygments.token` module, there is a special object called `Token` -that is used to create token types. - -You can create a new token type by accessing an attribute of `Token`: - -.. sourcecode:: pycon - - >>> from pygments.token import Token - >>> Token.String - Token.String - >>> Token.String is Token.String - True - -Note that tokens are singletons so you can use the ``is`` operator for comparing -token types. - -As of Pygments 0.7 you can also use the ``in`` operator to perform set tests: - -.. sourcecode:: pycon - - >>> from pygments.token import Comment - >>> Comment.Single in Comment - True - >>> Comment in Comment.Multi - False - -This can be useful in :doc:`filters ` and if you write lexers on your -own without using the base lexers. - -You can also split a token type into a hierarchy, and get the parent of it: - -.. sourcecode:: pycon - - >>> String.split() - [Token, Token.Literal, Token.Literal.String] - >>> String.parent - Token.Literal - -In principle, you can create an unlimited number of token types but nobody can -guarantee that a style would define style rules for a token type. Because of -that, Pygments proposes some global token types defined in the -`pygments.token.STANDARD_TYPES` dict. - -For some tokens aliases are already defined: - -.. sourcecode:: pycon - - >>> from pygments.token import String - >>> String - Token.Literal.String - -Inside the :mod:`pygments.token` module the following aliases are defined: - -============= ============================ ==================================== -`Text` `Token.Text` for any type of text data -`Whitespace` `Token.Text.Whitespace` for specially highlighted whitespace -`Error` `Token.Error` represents lexer errors -`Other` `Token.Other` special token for data not - matched by a parser (e.g. HTML - markup in PHP code) -`Keyword` `Token.Keyword` any kind of keywords -`Name` `Token.Name` variable/function names -`Literal` `Token.Literal` Any literals -`String` `Token.Literal.String` string literals -`Number` `Token.Literal.Number` number literals -`Operator` `Token.Operator` operators (``+``, ``not``...) -`Punctuation` `Token.Punctuation` punctuation (``[``, ``(``...) -`Comment` `Token.Comment` any kind of comments -`Generic` `Token.Generic` generic tokens (have a look at - the explanation below) -============= ============================ ==================================== - -The `Whitespace` token type is new in Pygments 0.8. It is used only by the -`VisibleWhitespaceFilter` currently. - -Normally you just create token types using the already defined aliases. For each -of those token aliases, a number of subtypes exists (excluding the special tokens -`Token.Text`, `Token.Error` and `Token.Other`) - -The `is_token_subtype()` function in the `pygments.token` module can be used to -test if a token type is a subtype of another (such as `Name.Tag` and `Name`). -(This is the same as ``Name.Tag in Name``. The overloaded `in` operator was newly -introduced in Pygments 0.7, the function still exists for backwards -compatibility.) - -With Pygments 0.7, it's also possible to convert strings to token types (for example -if you want to supply a token from the command line): - -.. sourcecode:: pycon - - >>> from pygments.token import String, string_to_tokentype - >>> string_to_tokentype("String") - Token.Literal.String - >>> string_to_tokentype("Token.Literal.String") - Token.Literal.String - >>> string_to_tokentype(String) - Token.Literal.String - - -Keyword Tokens -============== - -`Keyword` - For any kind of keyword (especially if it doesn't match any of the - subtypes of course). - -`Keyword.Constant` - For keywords that are constants (e.g. ``None`` in future Python versions). - -`Keyword.Declaration` - For keywords used for variable declaration (e.g. ``var`` in some programming - languages like JavaScript). - -`Keyword.Namespace` - For keywords used for namespace declarations (e.g. ``import`` in Python and - Java and ``package`` in Java). - -`Keyword.Pseudo` - For keywords that aren't really keywords (e.g. ``None`` in old Python - versions). - -`Keyword.Reserved` - For reserved keywords. - -`Keyword.Type` - For builtin types that can't be used as identifiers (e.g. ``int``, - ``char`` etc. in C). - - -Name Tokens -=========== - -`Name` - For any name (variable names, function names, classes). - -`Name.Attribute` - For all attributes (e.g. in HTML tags). - -`Name.Builtin` - Builtin names; names that are available in the global namespace. - -`Name.Builtin.Pseudo` - Builtin names that are implicit (e.g. ``self`` in Ruby, ``this`` in Java). - -`Name.Class` - Class names. Because no lexer can know if a name is a class or a function - or something else this token is meant for class declarations. - -`Name.Constant` - Token type for constants. In some languages you can recognise a token by the - way it's defined (the value after a ``const`` keyword for example). In - other languages constants are uppercase by definition (Ruby). - -`Name.Decorator` - Token type for decorators. Decorators are syntactic elements in the Python - language. Similar syntax elements exist in C# and Java. - -`Name.Entity` - Token type for special entities. (e.g. `` `` in HTML). - -`Name.Exception` - Token type for exception names (e.g. ``RuntimeError`` in Python). Some languages - define exceptions in the function signature (Java). You can highlight - the name of that exception using this token then. - -`Name.Function` - Token type for function names. - -`Name.Function.Magic` - same as `Name.Function` but for special function names that have an implicit use - in a language (e.g. ``__init__`` method in Python). - -`Name.Label` - Token type for label names (e.g. in languages that support ``goto``). - -`Name.Namespace` - Token type for namespaces. (e.g. import paths in Java/Python), names following - the ``module``/``namespace`` keyword in other languages. - -`Name.Other` - Other names. Normally unused. - -`Name.Tag` - Tag names (in HTML/XML markup or configuration files). - -`Name.Variable` - Token type for variables. Some languages have prefixes for variable names - (PHP, Ruby, Perl). You can highlight them using this token. - -`Name.Variable.Class` - same as `Name.Variable` but for class variables (also static variables). - -`Name.Variable.Global` - same as `Name.Variable` but for global variables (used in Ruby, for - example). - -`Name.Variable.Instance` - same as `Name.Variable` but for instance variables. - -`Name.Variable.Magic` - same as `Name.Variable` but for special variable names that have an implicit use - in a language (e.g. ``__doc__`` in Python). - - -Literals -======== - -`Literal` - For any literal (if not further defined). - -`Literal.Date` - for date literals (e.g. ``42d`` in Boo). - - -`String` - For any string literal. - -`String.Affix` - Token type for affixes that further specify the type of the string they're - attached to (e.g. the prefixes ``r`` and ``u8`` in ``r"foo"`` and ``u8"foo"``). - -`String.Backtick` - Token type for strings enclosed in backticks. - -`String.Char` - Token type for single characters (e.g. Java, C). - -`String.Delimiter` - Token type for delimiting identifiers in "heredoc", raw and other similar - strings (e.g. the word ``END`` in Perl code ``print <<'END';``). - -`String.Doc` - Token type for documentation strings (for example Python). - -`String.Double` - Double quoted strings. - -`String.Escape` - Token type for escape sequences in strings. - -`String.Heredoc` - Token type for "heredoc" strings (e.g. in Ruby or Perl). - -`String.Interpol` - Token type for interpolated parts in strings (e.g. ``#{foo}`` in Ruby). - -`String.Other` - Token type for any other strings (for example ``%q{foo}`` string constructs - in Ruby). - -`String.Regex` - Token type for regular expression literals (e.g. ``/foo/`` in JavaScript). - -`String.Single` - Token type for single quoted strings. - -`String.Symbol` - Token type for symbols (e.g. ``:foo`` in LISP or Ruby). - - -`Number` - Token type for any number literal. - -`Number.Bin` - Token type for binary literals (e.g. ``0b101010``). - -`Number.Float` - Token type for float literals (e.g. ``42.0``). - -`Number.Hex` - Token type for hexadecimal number literals (e.g. ``0xdeadbeef``). - -`Number.Integer` - Token type for integer literals (e.g. ``42``). - -`Number.Integer.Long` - Token type for long integer literals (e.g. ``42L`` in Python). - -`Number.Oct` - Token type for octal literals. - - -Operators -========= - -`Operator` - For any punctuation operator (e.g. ``+``, ``-``). - -`Operator.Word` - For any operator that is a word (e.g. ``not``). - - -Punctuation -=========== - -.. versionadded:: 0.7 - -`Punctuation` - For any punctuation which is not an operator (e.g. ``[``, ``(``...) - - -Comments -======== - -`Comment` - Token type for any comment. - -`Comment.Hashbang` - Token type for hashbang comments (i.e. first lines of files that start with - ``#!``). - -`Comment.Multiline` - Token type for multiline comments. - -`Comment.Preproc` - Token type for preprocessor comments (also ``>> from pygments.token import Token + >>> Token.String + Token.String + >>> Token.String is Token.String + True + +Note that tokens are singletons so you can use the ``is`` operator for comparing +token types. + +As of Pygments 0.7 you can also use the ``in`` operator to perform set tests: + +.. sourcecode:: pycon + + >>> from pygments.token import Comment + >>> Comment.Single in Comment + True + >>> Comment in Comment.Multi + False + +This can be useful in :doc:`filters ` and if you write lexers on your +own without using the base lexers. + +You can also split a token type into a hierarchy, and get the parent of it: + +.. sourcecode:: pycon + + >>> String.split() + [Token, Token.Literal, Token.Literal.String] + >>> String.parent + Token.Literal + +In principle, you can create an unlimited number of token types but nobody can +guarantee that a style would define style rules for a token type. Because of +that, Pygments proposes some global token types defined in the +`pygments.token.STANDARD_TYPES` dict. + +For some tokens aliases are already defined: + +.. sourcecode:: pycon + + >>> from pygments.token import String + >>> String + Token.Literal.String + +Inside the :mod:`pygments.token` module the following aliases are defined: + +============= ============================ ==================================== +`Text` `Token.Text` for any type of text data +`Whitespace` `Token.Text.Whitespace` for specially highlighted whitespace +`Error` `Token.Error` represents lexer errors +`Other` `Token.Other` special token for data not + matched by a parser (e.g. HTML + markup in PHP code) +`Keyword` `Token.Keyword` any kind of keywords +`Name` `Token.Name` variable/function names +`Literal` `Token.Literal` Any literals +`String` `Token.Literal.String` string literals +`Number` `Token.Literal.Number` number literals +`Operator` `Token.Operator` operators (``+``, ``not``...) +`Punctuation` `Token.Punctuation` punctuation (``[``, ``(``...) +`Comment` `Token.Comment` any kind of comments +`Generic` `Token.Generic` generic tokens (have a look at + the explanation below) +============= ============================ ==================================== + +The `Whitespace` token type is new in Pygments 0.8. It is used only by the +`VisibleWhitespaceFilter` currently. + +Normally you just create token types using the already defined aliases. For each +of those token aliases, a number of subtypes exists (excluding the special tokens +`Token.Text`, `Token.Error` and `Token.Other`) + +The `is_token_subtype()` function in the `pygments.token` module can be used to +test if a token type is a subtype of another (such as `Name.Tag` and `Name`). +(This is the same as ``Name.Tag in Name``. The overloaded `in` operator was newly +introduced in Pygments 0.7, the function still exists for backwards +compatibility.) + +With Pygments 0.7, it's also possible to convert strings to token types (for example +if you want to supply a token from the command line): + +.. sourcecode:: pycon + + >>> from pygments.token import String, string_to_tokentype + >>> string_to_tokentype("String") + Token.Literal.String + >>> string_to_tokentype("Token.Literal.String") + Token.Literal.String + >>> string_to_tokentype(String) + Token.Literal.String + + +Keyword Tokens +============== + +`Keyword` + For any kind of keyword (especially if it doesn't match any of the + subtypes of course). + +`Keyword.Constant` + For keywords that are constants (e.g. ``None`` in future Python versions). + +`Keyword.Declaration` + For keywords used for variable declaration (e.g. ``var`` in some programming + languages like JavaScript). + +`Keyword.Namespace` + For keywords used for namespace declarations (e.g. ``import`` in Python and + Java and ``package`` in Java). + +`Keyword.Pseudo` + For keywords that aren't really keywords (e.g. ``None`` in old Python + versions). + +`Keyword.Reserved` + For reserved keywords. + +`Keyword.Type` + For builtin types that can't be used as identifiers (e.g. ``int``, + ``char`` etc. in C). + + +Name Tokens +=========== + +`Name` + For any name (variable names, function names, classes). + +`Name.Attribute` + For all attributes (e.g. in HTML tags). + +`Name.Builtin` + Builtin names; names that are available in the global namespace. + +`Name.Builtin.Pseudo` + Builtin names that are implicit (e.g. ``self`` in Ruby, ``this`` in Java). + +`Name.Class` + Class names. Because no lexer can know if a name is a class or a function + or something else this token is meant for class declarations. + +`Name.Constant` + Token type for constants. In some languages you can recognise a token by the + way it's defined (the value after a ``const`` keyword for example). In + other languages constants are uppercase by definition (Ruby). + +`Name.Decorator` + Token type for decorators. Decorators are syntactic elements in the Python + language. Similar syntax elements exist in C# and Java. + +`Name.Entity` + Token type for special entities. (e.g. `` `` in HTML). + +`Name.Exception` + Token type for exception names (e.g. ``RuntimeError`` in Python). Some languages + define exceptions in the function signature (Java). You can highlight + the name of that exception using this token then. + +`Name.Function` + Token type for function names. + +`Name.Function.Magic` + same as `Name.Function` but for special function names that have an implicit use + in a language (e.g. ``__init__`` method in Python). + +`Name.Label` + Token type for label names (e.g. in languages that support ``goto``). + +`Name.Namespace` + Token type for namespaces. (e.g. import paths in Java/Python), names following + the ``module``/``namespace`` keyword in other languages. + +`Name.Other` + Other names. Normally unused. + +`Name.Tag` + Tag names (in HTML/XML markup or configuration files). + +`Name.Variable` + Token type for variables. Some languages have prefixes for variable names + (PHP, Ruby, Perl). You can highlight them using this token. + +`Name.Variable.Class` + same as `Name.Variable` but for class variables (also static variables). + +`Name.Variable.Global` + same as `Name.Variable` but for global variables (used in Ruby, for + example). + +`Name.Variable.Instance` + same as `Name.Variable` but for instance variables. + +`Name.Variable.Magic` + same as `Name.Variable` but for special variable names that have an implicit use + in a language (e.g. ``__doc__`` in Python). + + +Literals +======== + +`Literal` + For any literal (if not further defined). + +`Literal.Date` + for date literals (e.g. ``42d`` in Boo). + + +`String` + For any string literal. + +`String.Affix` + Token type for affixes that further specify the type of the string they're + attached to (e.g. the prefixes ``r`` and ``u8`` in ``r"foo"`` and ``u8"foo"``). + +`String.Backtick` + Token type for strings enclosed in backticks. + +`String.Char` + Token type for single characters (e.g. Java, C). + +`String.Delimiter` + Token type for delimiting identifiers in "heredoc", raw and other similar + strings (e.g. the word ``END`` in Perl code ``print <<'END';``). + +`String.Doc` + Token type for documentation strings (for example Python). + +`String.Double` + Double quoted strings. + +`String.Escape` + Token type for escape sequences in strings. + +`String.Heredoc` + Token type for "heredoc" strings (e.g. in Ruby or Perl). + +`String.Interpol` + Token type for interpolated parts in strings (e.g. ``#{foo}`` in Ruby). + +`String.Other` + Token type for any other strings (for example ``%q{foo}`` string constructs + in Ruby). + +`String.Regex` + Token type for regular expression literals (e.g. ``/foo/`` in JavaScript). + +`String.Single` + Token type for single quoted strings. + +`String.Symbol` + Token type for symbols (e.g. ``:foo`` in LISP or Ruby). + + +`Number` + Token type for any number literal. + +`Number.Bin` + Token type for binary literals (e.g. ``0b101010``). + +`Number.Float` + Token type for float literals (e.g. ``42.0``). + +`Number.Hex` + Token type for hexadecimal number literals (e.g. ``0xdeadbeef``). + +`Number.Integer` + Token type for integer literals (e.g. ``42``). + +`Number.Integer.Long` + Token type for long integer literals (e.g. ``42L`` in Python). + +`Number.Oct` + Token type for octal literals. + + +Operators +========= + +`Operator` + For any punctuation operator (e.g. ``+``, ``-``). + +`Operator.Word` + For any operator that is a word (e.g. ``not``). + + +Punctuation +=========== + +.. versionadded:: 0.7 + +`Punctuation` + For any punctuation which is not an operator (e.g. ``[``, ``(``...) + + +Comments +======== + +`Comment` + Token type for any comment. + +`Comment.Hashbang` + Token type for hashbang comments (i.e. first lines of files that start with + ``#!``). + +`Comment.Multiline` + Token type for multiline comments. + +`Comment.Preproc` + Token type for preprocessor comments (also ```. - -.. versionadded:: 0.7 - The formatters now also accept an `outencoding` option which will override - the `encoding` option if given. This makes it possible to use a single - options dict with lexers and formatters, and still have different input and - output encodings. - -.. _chardet: https://chardet.github.io/ +===================== +Unicode and Encodings +===================== + +Since Pygments 0.6, all lexers use unicode strings internally. Because of that +you might encounter the occasional :exc:`UnicodeDecodeError` if you pass strings +with the wrong encoding. + +Per default all lexers have their input encoding set to `guess`. This means +that the following encodings are tried: + +* UTF-8 (including BOM handling) +* The locale encoding (i.e. the result of `locale.getpreferredencoding()`) +* As a last resort, `latin1` + +If you pass a lexer a byte string object (not unicode), it tries to decode the +data using this encoding. + +You can override the encoding using the `encoding` or `inencoding` lexer +options. If you have the `chardet`_ library installed and set the encoding to +``chardet`` if will analyse the text and use the encoding it thinks is the +right one automatically: + +.. sourcecode:: python + + from pygments.lexers import PythonLexer + lexer = PythonLexer(encoding='chardet') + +The best way is to pass Pygments unicode objects. In that case you can't get +unexpected output. + +The formatters now send Unicode objects to the stream if you don't set the +output encoding. You can do so by passing the formatters an `encoding` option: + +.. sourcecode:: python + + from pygments.formatters import HtmlFormatter + f = HtmlFormatter(encoding='utf-8') + +**You will have to set this option if you have non-ASCII characters in the +source and the output stream does not accept Unicode written to it!** +This is the case for all regular files and for terminals. + +Note: The Terminal formatter tries to be smart: if its output stream has an +`encoding` attribute, and you haven't set the option, it will encode any +Unicode string with this encoding before writing it. This is the case for +`sys.stdout`, for example. The other formatters don't have that behavior. + +Another note: If you call Pygments via the command line (`pygmentize`), +encoding is handled differently, see :doc:`the command line docs `. + +.. versionadded:: 0.7 + The formatters now also accept an `outencoding` option which will override + the `encoding` option if given. This makes it possible to use a single + options dict with lexers and formatters, and still have different input and + output encodings. + +.. _chardet: https://chardet.github.io/ diff --git a/doc/download.rst b/doc/download.rst old mode 100644 new mode 100755 index 7ac0868..e3b3681 --- a/doc/download.rst +++ b/doc/download.rst @@ -1,39 +1,39 @@ -Download and installation -========================= - -The current release is version |version|. - -Packaged versions ------------------ - -You can download it `from the Python Package Index -`_. For installation of packages from -PyPI, we recommend `Pip `_, which works on all -major platforms. - -Under Linux, most distributions include a package for Pygments, usually called -``pygments`` or ``python-pygments``. You can install it with the package -manager as usual. - -Development sources -------------------- - -We're using the Git version control system. You can get the development source -using this command:: - - git clone https://github.com/pygments/pygments - -Development takes place at `GitHub `_. - -The latest changes in the development source code are listed in the `changelog -`_. - -.. Documentation - ------------- - -.. XXX todo - - You can download the documentation either as - a bunch of rst files from the Git repository, see above, or - as a tar.gz containing rendered HTML files:

-

pygmentsdocs.tar.gz

+Download and installation +========================= + +The current release is version |version|. + +Packaged versions +----------------- + +You can download it `from the Python Package Index +`_. For installation of packages from +PyPI, we recommend `Pip `_, which works on all +major platforms. + +Under Linux, most distributions include a package for Pygments, usually called +``pygments`` or ``python-pygments``. You can install it with the package +manager as usual. + +Development sources +------------------- + +We're using the Git version control system. You can get the development source +using this command:: + + git clone https://github.com/pygments/pygments + +Development takes place at `GitHub `_. + +The latest changes in the development source code are listed in the `changelog +`_. + +.. Documentation + ------------- + +.. XXX todo + + You can download the documentation either as + a bunch of rst files from the Git repository, see above, or + as a tar.gz containing rendered HTML files:

+

pygmentsdocs.tar.gz

diff --git a/doc/faq.rst b/doc/faq.rst old mode 100644 new mode 100755 index 69ad393..8e339c2 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -1,141 +1,141 @@ -:orphan: - -Pygments FAQ -============= - -What is Pygments? ------------------ - -Pygments is a syntax highlighting engine written in Python. That means, it will -take source code (or other markup) in a supported language and output a -processed version (in different formats) containing syntax highlighting markup. - -Its features include: - -* a wide range of common :doc:`languages and markup formats ` is supported -* new languages and formats are added easily -* a number of output formats is available, including: - - - HTML - - ANSI sequences (console output) - - LaTeX - - RTF - -* it is usable as a command-line tool and as a library -* parsing and formatting is fast - -Pygments is licensed under the BSD license. - -Where does the name Pygments come from? ---------------------------------------- - -*Py* of course stands for Python, while *pigments* are used for coloring paint, -and in this case, source code! - -What are the system requirements? ---------------------------------- - -Pygments only needs a standard Python install, version 2.7 or higher or version -3.5 or higher for Python 3. No additional libraries are needed. - -How can I use Pygments? ------------------------ - -Pygments is usable as a command-line tool as well as a library. - -From the command-line, usage looks like this (assuming the pygmentize script is -properly installed):: - - pygmentize -f html /path/to/file.py - -This will print a HTML-highlighted version of /path/to/file.py to standard output. - -For a complete help, please run ``pygmentize -h``. - -Usage as a library is thoroughly demonstrated in the Documentation section. - -How do I make a new style? --------------------------- - -Please see the :doc:`documentation on styles `. - -How can I report a bug or suggest a feature? --------------------------------------------- - -Please report bugs and feature wishes in the tracker at GitHub. - -You can also e-mail the authors, see the contact details. - -I want this support for this language! --------------------------------------- - -Instead of waiting for others to include language support, why not write it -yourself? All you have to know is :doc:`outlined in the docs -`. - -Can I use Pygments for programming language processing? -------------------------------------------------------- - -The Pygments lexing machinery is quite powerful can be used to build lexers for -basically all languages. However, parsing them is not possible, though some -lexers go some steps in this direction in order to e.g. highlight function names -differently. - -Also, error reporting is not the scope of Pygments. It focuses on correctly -highlighting syntactically valid documents, not finding and compensating errors. - -Who uses Pygments? ------------------- - -This is an (incomplete) list of projects and sites known to use the Pygments highlighter. - -* `Wikipedia `_ -* `BitBucket `_, a Mercurial and Git hosting site -* `The Sphinx documentation builder `_, for embedded source examples -* `rst2pdf `_, a reStructuredText to PDF converter -* `Codecov `_, a code coverage CI service -* `Trac `_, the universal project management tool -* `AsciiDoc `_, a text-based documentation generator -* `ActiveState Code `_, the Python Cookbook successor -* `ViewVC `_, a web-based version control repository browser -* `BzrFruit `_, a Bazaar branch viewer -* `QBzr `_, a cross-platform Qt-based GUI front end for Bazaar -* `Review Board `_, a collaborative code reviewing tool -* `Diamanda `_, a Django powered wiki system with support for Pygments -* `Progopedia `_ (`English `_), - an encyclopedia of programming languages -* `Bruce `_, a reStructuredText presentation tool -* `PIDA `_, a universal IDE written in Python -* `BPython `_, a curses-based intelligent Python shell -* `PuDB `_, a console Python debugger -* `XWiki `_, a wiki-based development framework in Java, using Jython -* `roux `_, a script for running R scripts - and creating beautiful output including graphs -* `hurl `_, a web service for making HTTP requests -* `wxHTMLPygmentizer `_ is - a GUI utility, used to make code-colorization easier -* `Postmarkup `_, a BBCode to XHTML generator -* `WpPygments `_, and `WPygments - `_, highlighter plugins for WordPress -* `Siafoo `_, a tool for sharing and storing useful code and programming experience -* `D source `_, a community for the D programming language -* `dpaste.com `_, another Django pastebin -* `Django snippets `_, a pastebin for Django code -* `Fayaa `_, a Chinese pastebin -* `Incollo.com `_, a free collaborative debugging tool -* `PasteBox `_, a pastebin focused on privacy -* `hilite.me `_, a site to highlight code snippets -* `patx.me `_, a pastebin -* `Fluidic `_, an experiment in - integrating shells with a GUI -* `pygments.rb `_, a pygments wrapper for Ruby -* `Clygments `_, a pygments wrapper for - Clojure -* `PHPygments `_, a pygments wrapper for PHP -* `Spyder `_, the Scientific Python Development - Environment, uses pygments for the multi-language syntax highlighting in its - `editor `_. -* `snippet.host `_, minimal text and code snippet hosting - -If you have a project or web site using Pygments, `open an issue or PR -`_ and we'll add a line here. +:orphan: + +Pygments FAQ +============= + +What is Pygments? +----------------- + +Pygments is a syntax highlighting engine written in Python. That means, it will +take source code (or other markup) in a supported language and output a +processed version (in different formats) containing syntax highlighting markup. + +Its features include: + +* a wide range of common :doc:`languages and markup formats ` is supported +* new languages and formats are added easily +* a number of output formats is available, including: + + - HTML + - ANSI sequences (console output) + - LaTeX + - RTF + +* it is usable as a command-line tool and as a library +* parsing and formatting is fast + +Pygments is licensed under the BSD license. + +Where does the name Pygments come from? +--------------------------------------- + +*Py* of course stands for Python, while *pigments* are used for coloring paint, +and in this case, source code! + +What are the system requirements? +--------------------------------- + +Pygments only needs a standard Python install, version 2.7 or higher or version +3.5 or higher for Python 3. No additional libraries are needed. + +How can I use Pygments? +----------------------- + +Pygments is usable as a command-line tool as well as a library. + +From the command-line, usage looks like this (assuming the pygmentize script is +properly installed):: + + pygmentize -f html /path/to/file.py + +This will print a HTML-highlighted version of /path/to/file.py to standard output. + +For a complete help, please run ``pygmentize -h``. + +Usage as a library is thoroughly demonstrated in the Documentation section. + +How do I make a new style? +-------------------------- + +Please see the :doc:`documentation on styles `. + +How can I report a bug or suggest a feature? +-------------------------------------------- + +Please report bugs and feature wishes in the tracker at GitHub. + +You can also e-mail the authors, see the contact details. + +I want this support for this language! +-------------------------------------- + +Instead of waiting for others to include language support, why not write it +yourself? All you have to know is :doc:`outlined in the docs +`. + +Can I use Pygments for programming language processing? +------------------------------------------------------- + +The Pygments lexing machinery is quite powerful can be used to build lexers for +basically all languages. However, parsing them is not possible, though some +lexers go some steps in this direction in order to e.g. highlight function names +differently. + +Also, error reporting is not the scope of Pygments. It focuses on correctly +highlighting syntactically valid documents, not finding and compensating errors. + +Who uses Pygments? +------------------ + +This is an (incomplete) list of projects and sites known to use the Pygments highlighter. + +* `Wikipedia `_ +* `BitBucket `_, a Mercurial and Git hosting site +* `The Sphinx documentation builder `_, for embedded source examples +* `rst2pdf `_, a reStructuredText to PDF converter +* `Codecov `_, a code coverage CI service +* `Trac `_, the universal project management tool +* `AsciiDoc `_, a text-based documentation generator +* `ActiveState Code `_, the Python Cookbook successor +* `ViewVC `_, a web-based version control repository browser +* `BzrFruit `_, a Bazaar branch viewer +* `QBzr `_, a cross-platform Qt-based GUI front end for Bazaar +* `Review Board `_, a collaborative code reviewing tool +* `Diamanda `_, a Django powered wiki system with support for Pygments +* `Progopedia `_ (`English `_), + an encyclopedia of programming languages +* `Bruce `_, a reStructuredText presentation tool +* `PIDA `_, a universal IDE written in Python +* `BPython `_, a curses-based intelligent Python shell +* `PuDB `_, a console Python debugger +* `XWiki `_, a wiki-based development framework in Java, using Jython +* `roux `_, a script for running R scripts + and creating beautiful output including graphs +* `hurl `_, a web service for making HTTP requests +* `wxHTMLPygmentizer `_ is + a GUI utility, used to make code-colorization easier +* `Postmarkup `_, a BBCode to XHTML generator +* `WpPygments `_, and `WPygments + `_, highlighter plugins for WordPress +* `Siafoo `_, a tool for sharing and storing useful code and programming experience +* `D source `_, a community for the D programming language +* `dpaste.com `_, another Django pastebin +* `Django snippets `_, a pastebin for Django code +* `Fayaa `_, a Chinese pastebin +* `Incollo.com `_, a free collaborative debugging tool +* `PasteBox `_, a pastebin focused on privacy +* `hilite.me `_, a site to highlight code snippets +* `patx.me `_, a pastebin +* `Fluidic `_, an experiment in + integrating shells with a GUI +* `pygments.rb `_, a pygments wrapper for Ruby +* `Clygments `_, a pygments wrapper for + Clojure +* `PHPygments `_, a pygments wrapper for PHP +* `Spyder `_, the Scientific Python Development + Environment, uses pygments for the multi-language syntax highlighting in its + `editor `_. +* `snippet.host `_, minimal text and code snippet hosting + +If you have a project or web site using Pygments, `open an issue or PR +`_ and we'll add a line here. diff --git a/doc/index.rst b/doc/index.rst old mode 100644 new mode 100755 index e538aad..2ff2cbb --- a/doc/index.rst +++ b/doc/index.rst @@ -1,49 +1,49 @@ -Welcome! -======== - -This is the home of Pygments. It is a generic syntax highlighter suitable for -use in code hosting, forums, wikis or other applications that need to prettify -source code. Highlights are: - -* a wide range of over 500 languages and other text formats is supported -* special attention is paid to details that increase highlighting quality -* support for new languages and formats are added easily; most languages use a - simple regex-based lexing mechanism -* a number of output formats is available, among them HTML, RTF, LaTeX and ANSI - sequences -* it is usable as a command-line tool and as a library -* ... and it highlights even Perl 6! - -Read more in the :doc:`FAQ list ` or the :doc:`documentation `, -or `download the latest release `_. - -.. _contribute: - -Contribute ----------- - -Like every open-source project, we are always looking for volunteers to help us -with programming. Python knowledge is required, but don't fear: Python is a very -clear and easy to learn language. - -Development takes place on `GitHub `_. - -If you found a bug, just open a ticket in the GitHub tracker. Be sure to log -in to be notified when the issue is fixed -- development is not fast-paced as -the library is quite stable. You can also send an e-mail to the developers, see -below. - -The authors ------------ - -Pygments is maintained by **Georg Brandl**, e-mail address *georg*\ *@*\ *python.org* -and **Matthäus Chajdas**. - -Many lexers and fixes have been contributed by **Armin Ronacher**, the rest of -the `Pocoo `_ team and **Tim Hatch**. - -.. toctree:: - :maxdepth: 1 - :hidden: - - docs/index +Welcome! +======== + +This is the home of Pygments. It is a generic syntax highlighter suitable for +use in code hosting, forums, wikis or other applications that need to prettify +source code. Highlights are: + +* a wide range of over 500 languages and other text formats is supported +* special attention is paid to details that increase highlighting quality +* support for new languages and formats are added easily; most languages use a + simple regex-based lexing mechanism +* a number of output formats is available, among them HTML, RTF, LaTeX and ANSI + sequences +* it is usable as a command-line tool and as a library +* ... and it highlights even Perl 6! + +Read more in the :doc:`FAQ list ` or the :doc:`documentation `, +or `download the latest release `_. + +.. _contribute: + +Contribute +---------- + +Like every open-source project, we are always looking for volunteers to help us +with programming. Python knowledge is required, but don't fear: Python is a very +clear and easy to learn language. + +Development takes place on `GitHub `_. + +If you found a bug, just open a ticket in the GitHub tracker. Be sure to log +in to be notified when the issue is fixed -- development is not fast-paced as +the library is quite stable. You can also send an e-mail to the developers, see +below. + +The authors +----------- + +Pygments is maintained by **Georg Brandl**, e-mail address *georg*\ *@*\ *python.org* +and **Matthäus Chajdas**. + +Many lexers and fixes have been contributed by **Armin Ronacher**, the rest of +the `Pocoo `_ team and **Tim Hatch**. + +.. toctree:: + :maxdepth: 1 + :hidden: + + docs/index diff --git a/doc/languages.rst b/doc/languages.rst old mode 100644 new mode 100755 index 9fc0093..b78b660 --- a/doc/languages.rst +++ b/doc/languages.rst @@ -1,297 +1,355 @@ -:orphan: - -Supported languages -=================== - -Pygments supports an ever-growing range of languages. Watch this space... - -Programming languages ---------------------- - -* ActionScript -* Ada -* Agda (incl. literate) -* `Alloy `_ -* `AMPL `_ -* ANTLR -* APL -* AppleScript -* Assembly (various) -* Asymptote -* `Augeas `_ -* AutoIt -* Awk -* BBC Basic -* Befunge -* BlitzBasic -* `Boa `_ -* Boo -* `Boogie `_ -* BrainFuck -* C, C++ (incl. dialects like Arduino) -* C# -* `Chapel `_ -* `Charm++ CI `_ -* Cirru -* Clay -* `Clean `_ -* Clojure -* CoffeeScript -* ColdFusion -* Common Lisp -* Component Pascal -* `Coq `_ -* Croc (MiniD) -* Cryptol (incl. Literate Cryptol) -* `Crystal `_ -* Cypher -* `Cython `_ -* `D `_ -* Dart -* DCPU-16 -* Delphi -* Dylan (incl. console) -* Eiffel -* `Elm `_ -* Emacs Lisp -* Email -* Erlang (incl. shell sessions) -* `Ezhil `_ -* Factor -* Fancy -* Fantom -* `Fennel `_ -* `FloScript `_ -* Fortran -* `FreeFEM++ `_ -* F# -* GAP -* Gherkin (Cucumber) -* GLSL shaders -* `Golo `_ -* Gosu -* Groovy -* `Haskell `_ (incl. Literate Haskell) -* `Haxe `_ -* HLSL -* `HSpec `_ -* Hy -* IDL -* Idris (incl. Literate Idris) -* Igor Pro -* Io -* Jags -* Java -* JavaScript -* Jasmin -* Jcl -* `Julia `_ -* Kotlin -* Lasso (incl. templating) -* Limbo -* LiveScript -* `LLVM MIR `_ -* Logtalk -* Logos -* `Lua `_ -* Mathematica -* Matlab -* `MiniScript `_ -* Modelica -* Modula-2 -* Monkey -* `Monte `_ -* MoonScript -* Mosel -* MuPad -* NASM -* Nemerle -* NesC -* NewLISP -* Nimrod -* `Nit `_ -* Notmuch -* NuSMV -* Objective-C -* Objective-J -* Octave -* OCaml -* Opa -* OpenCOBOL -* `ParaSail `_ -* Pawn -* PHP -* `Perl 5 `_ -* Pike -* `Pony `_ -* PovRay -* PostScript -* PowerShell -* `Praat `_ -* Prolog -* `Python `_ 2.x and 3.x (incl. console sessions and - tracebacks) -* QBasic -* `Racket `_ -* `Raku `_ a.k.a. Perl 6 -* `ReasonML `_ -* `REBOL `_ -* `Red `_ -* Redcode -* Rexx -* Ride -* `Ruby `_ (incl. irb sessions) -* `Rust `_ -* S, S-Plus, R -* Scala -* `Scdoc `_ -* Scheme -* Scilab -* `SGF `_ -* Shell scripts (Bash, Tcsh, Fish) -* `Shen `_ -* Silver -* `Slash `_ -* `Slurm `_ -* Smalltalk -* SNOBOL -* `Snowball `_ -* `Solidity `_ -* SourcePawn -* `Stan `_ -* Standard ML -* Stata -* Swift -* Swig -* `SuperCollider `_ -* Tcl -* `Tera Term language `_ -* TypeScript -* TypoScript -* `USD `_ -* Unicon -* Urbiscript -* Vala -* VBScript -* Verilog, SystemVerilog -* VHDL -* Visual Basic.NET -* Visual FoxPro -* `Whiley `_ -* `Xtend `_ -* XQuery -* `Zeek `_ -* Zephir -* `Zig `_ - -Template languages ------------------- - -* Angular templates -* Cheetah templates -* ColdFusion -* `Django `_ / `Jinja - `_ templates -* ERB (Ruby templating) -* Evoque -* `Genshi `_ (the Trac template language) -* Handlebars -* JSP (Java Server Pages) -* Liquid -* `Myghty `_ (the HTML::Mason based framework) -* `Mako `_ (the Myghty successor) -* Slim -* `Smarty `_ templates (PHP templating) -* Tea -* `Twig `_ - -Other markup ------------- - -* Apache config files -* Apache Pig -* BBCode -* CapDL -* `Cap'n Proto `_ -* CMake -* `Csound `_ scores -* CSS -* Debian control files -* Diff files -* Dockerfiles -* DTD -* EBNF -* E-mail headers -* Extempore -* Flatline -* Gettext catalogs -* Gnuplot script -* Groff markup -* Hexdumps -* HTML -* HTTP sessions -* IDL -* Inform -* INI-style config files -* IRC logs (irssi style) -* Isabelle -* JSGF notation -* JSON, JSON-LD -* Lean theorem prover -* Lighttpd config files -* Linux kernel log (dmesg) -* LLVM assembly -* LSL scripts -* Makefiles -* MoinMoin/Trac Wiki markup -* MQL -* MySQL -* NCAR command language -* Nginx config files -* `Nix language `_ -* NSIS scripts -* Notmuch -* `PEG `_ -* POV-Ray scenes -* `Puppet `_ -* QML -* Ragel -* Redcode -* ReST -* `Roboconf `_ -* Robot Framework -* RPM spec files -* Rql -* RSL -* Scdoc -* Sieve -* SPARQL -* SQL, also MySQL, SQLite -* Squid configuration -* TADS 3 -* Terraform -* TeX -* `Thrift `_ -* `TOML `_ -* Treetop grammars -* USD (Universal Scene Description) -* Varnish configs -* VGL -* Vim Script -* WDiff -* Web IDL -* Windows batch files -* XML -* XSLT -* YAML -* Windows Registry files - -... that's all? ---------------- - -Well, why not write your own? Contributing to Pygments is easy and fun. Take a -look at the :doc:`docs on lexer development `. Pull -requests are welcome on `GitHub `_. - -Note: the languages listed here are supported in the development version. The -latest release may lack a few of them. +:orphan: + +Supported languages +=================== + +Pygments supports an ever-growing range of languages. Watch this space... + +Programming languages +--------------------- + +* `ActionScript `_ +* `Ada `_ +* `Agda `_ (incl. literate) +* `Alloy `_ +* `AMPL `_ +* `ANTLR `_ +* `APL `_ +* `AppleScript `_ +* `Assembly `_ (various) +* `Asymptote `_ +* `Augeas `_ +* `AutoIt `_ +* `Awk `_ +* `BARE `_ +* `BBC Basic `_ +* `Befunge `_ +* `BlitzBasic `_ +* `Boa `_ +* `Boo `_ +* `Boogie `_ +* `BrainFuck `_ +* `C `_, `C++ `_ (incl. dialects like Arduino) +* `C# `_ +* `Chapel `_ +* `Charm++ CI `_ +* `Cirru `_ +* `Clay `_ +* `Clean `_ +* `Clojure `_ +* `CoffeeScript `_ +* `ColdFusion `_ +* `Common Lisp `_ +* `Component Pascal `_ +* `Coq `_ +* `Croc `_ (MiniD) +* `Cryptol `_ (incl. Literate Cryptol) +* `Crystal `_ +* `Cypher `_ +* `Cython `_ +* `D `_ +* `Dart `_ +* DCPU-16 +* `Delphi `_ +* `Devicetree `_ +* `Dylan `_ (incl. console) +* `Eiffel `_ +* `Elm `_ +* `Emacs Lisp `_ +* Email +* `Erlang `_ (incl. shell sessions) +* `Ezhil `_ +* `Execline `_ +* `Factor `_ +* `Fancy `_ +* `Fantom `_ +* `Fennel `_ +* `FloScript `_ +* `Fortran `_ +* `FreeFEM++ `_ +* `F# `_ +* `F* `_ +* `GAP `_ +* `GDScript `_ +* `Gherkin `_ (Cucumber) +* `GLSL `_ shaders +* `GnuCOBOL `_ (OpenCOBOL) +* `Golo `_ +* `Gosu `_ +* `Groovy `_ +* `Haskell `_ (incl. Literate Haskell) +* `Haxe `_ +* `HLSL `_ shaders +* `HSpec `_ +* `Hy `_ +* `IDL `_ +* `Idris `_ (incl. Literate Idris) +* `Igor Pro `_ +* `Io `_ +* `Jags `_ +* `Java `_ +* `JavaScript `_ +* `Jasmin `_ +* `Jcl `_ +* `Julia `_ +* `Kotlin `_ +* `Lasso `_ (incl. templating) +* `Limbo `_ +* `LiveScript `_ +* `LLVM MIR `_ +* `Logtalk `_ +* `Logos `_ +* `Lua `_ +* `Mathematica `_ +* `Matlab `_ +* `MiniScript `_ +* `Modelica `_ +* `Modula-2 `_ +* `Monkey `_ +* `Monte `_ +* `MoonScript `_ +* `Mosel `_ +* `MuPad `_ +* `NASM `_ +* `Nemerle `_ +* `NesC `_ +* `NewLISP `_ +* `Nim `_ +* `Nit `_ +* `Notmuch `_ +* `NuSMV `_ +* `Objective-C `_ +* `Objective-J `_ +* `Octave `_ +* `OCaml `_ +* `Opa `_ +* `ParaSail `_ +* `Pawn `_ +* `PHP `_ +* `Perl 5 `_ +* `Pike `_ +* `Pointless `_ +* `Pony `_ +* `PovRay `_ +* `PostScript `_ +* `PowerShell `_ +* `Praat `_ +* `Prolog `_ +* `Python `_ 2.x and 3.x (incl. console sessions and + tracebacks) +* `QBasic `_ +* `Racket `_ +* `Raku `_ a.k.a. Perl 6 +* `ReasonML `_ +* `REBOL `_ +* `Red `_ +* `Redcode `_ +* `Rexx `_ +* `Ride `_ +* `Ruby `_ (incl. irb sessions) +* `Rust `_ +* S, S-Plus, `R `_ +* `Scala `_ +* `Scdoc `_ +* `Scheme `_ +* `Scilab `_ +* `SGF `_ +* Shell scripts (`Bash `_, `Tcsh `_, `Fish `_) +* `Shen `_ +* `Silver `_ +* `Slash `_ +* `Slurm `_ +* `Smalltalk `_ +* `SNOBOL `_ +* `Snowball `_ +* `Solidity `_ +* `SourcePawn `_ +* `Stan `_ +* `Standard ML `_ +* `Stata `_ +* `Swift `_ +* `Swig `_ +* `SuperCollider `_ +* `Tcl `_ +* `Tera Term language `_ +* `TypeScript `_ +* `TypoScript `_ +* `USD `_ +* `Unicon `_ +* `Urbiscript `_ +* `Vala `_ +* `VBScript `_ +* Verilog, `SystemVerilog `_ +* `VHDL `_ +* `Visual Basic.NET `_ +* `Visual FoxPro `_ +* `Whiley `_ +* `Xtend `_ +* `XQuery `_ +* `Zeek `_ +* `Zephir `_ +* `Zig `_ + +Template languages +------------------ + +* `Angular templates `_ +* `Cheetah templates `_ +* `ColdFusion `_ +* `Django `_ / `Jinja + `_ templates +* `ERB `_ (Ruby templating) +* Evoque +* `Genshi `_ (the Trac template language) +* `Handlebars `_ +* `JSP `_ (Java Server Pages) +* `Liquid `_ +* `Myghty `_ (the HTML::Mason based framework) +* `Mako `_ (the Myghty successor) +* `Slim `_ +* `Smarty `_ templates (PHP templating) +* `Tea `_ +* `Twig `_ + +Other markup +------------ + +* Apache config files +* Apache Pig +* BBCode +* CapDL +* `Cap'n Proto `_ +* CMake +* `Csound `_ scores +* CSS +* Debian control files +* Diff files +* Dockerfiles +* DTD +* EBNF +* E-mail headers +* Extempore +* Flatline +* Gettext catalogs +* Gnuplot script +* Groff markup +* Hexdumps +* HTML +* HTTP sessions +* IDL +* Inform +* INI-style config files +* IRC logs (irssi style) +* Isabelle +* JSGF notation +* JSON, JSON-LD +* Lean theorem prover +* Lighttpd config files +* Linux kernel log (dmesg) +* LLVM assembly +* LSL scripts +* Makefiles +* MoinMoin/Trac Wiki markup +* MQL +* MySQL +* NCAR command language +* Nginx config files +* `Nix language `_ +* NSIS scripts +* Notmuch +* `PEG `_ +* POV-Ray scenes +* `PromQL `_ +* `Puppet `_ +* QML +* Ragel +* Redcode +* ReST +* `Roboconf `_ +* Robot Framework +* RPM spec files +* Rql +* RSL +* Scdoc +* Sieve +* Singularity +* SPARQL +* SQL, also MySQL, SQLite +* Squid configuration +* TADS 3 +* Terraform +* TeX +* `Thrift `_ +* `TNT `_ +* `TOML `_ +* Treetop grammars +* USD (Universal Scene Description) +* Varnish configs +* VGL +* Vim Script +* WDiff +* Web IDL +* Windows batch files +* XML +* XSLT +* YAML +* YANG +* Windows Registry files + + +Interactive terminal/shell sessions +----------------------------------- + +To highlight an interactive terminal or shell session, prefix your code snippet +with a specially formatted prompt. + +Supported shells with examples are shown below. In each example, prompt parts in +brackets ``[any]`` represent optional parts of the prompt, and prompt parts +without brackets or in parenthesis ``(any)`` represent required parts of the +prompt. + +* **Bash Session** (console, shell-session): + + .. code-block:: console + + [any@any]$ ls -lh + [any@any]# ls -lh + [any@any]% ls -lh + $ ls -lh + # ls -lh + % ls -lh + > ls -lh + +* **MSDOS Session** (doscon): + + .. code-block:: doscon + + [any]> dir + > dir + More? dir + +* **Tcsh Session** (tcshcon): + + .. code-block:: tcshcon + + (any)> ls -lh + ? ls -lh + +* **PowerShell Session** (ps1con): + + .. code-block:: ps1con + + PS[any]> Get-ChildItem + PS> Get-ChildItem + >> Get-ChildItem + + +... that's all? +--------------- + +Well, why not write your own? Contributing to Pygments is easy and fun. Take a +look at the :doc:`docs on lexer development `. Pull +requests are welcome on `GitHub `_. + +Note: the languages listed here are supported in the development version. The +latest release may lack a few of them. diff --git a/doc/make.bat b/doc/make.bat old mode 100644 new mode 100755 index 8803c98..4ed3235 --- a/doc/make.bat +++ b/doc/make.bat @@ -1,190 +1,190 @@ -@ECHO OFF - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set BUILDDIR=_build -set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . -set I18NSPHINXOPTS=%SPHINXOPTS% . -if NOT "%PAPER%" == "" ( - set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% - set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% -) - -if "%1" == "" goto help - -if "%1" == "help" ( - :help - echo.Please use `make ^` where ^ is one of - echo. html to make standalone HTML files - echo. dirhtml to make HTML files named index.html in directories - echo. singlehtml to make a single large HTML file - echo. pickle to make pickle files - echo. json to make JSON files - echo. htmlhelp to make HTML files and a HTML help project - echo. qthelp to make HTML files and a qthelp project - echo. devhelp to make HTML files and a Devhelp project - echo. epub to make an epub - echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter - echo. text to make text files - echo. man to make manual pages - echo. texinfo to make Texinfo files - echo. gettext to make PO message catalogs - echo. changes to make an overview over all changed/added/deprecated items - echo. linkcheck to check all external links for integrity - echo. doctest to run all doctests embedded in the documentation if enabled - goto end -) - -if "%1" == "clean" ( - for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i - del /q /s %BUILDDIR%\* - goto end -) - -if "%1" == "html" ( - %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/html. - goto end -) - -if "%1" == "dirhtml" ( - %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. - goto end -) - -if "%1" == "singlehtml" ( - %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. - goto end -) - -if "%1" == "pickle" ( - %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can process the pickle files. - goto end -) - -if "%1" == "json" ( - %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can process the JSON files. - goto end -) - -if "%1" == "htmlhelp" ( - %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can run HTML Help Workshop with the ^ -.hhp project file in %BUILDDIR%/htmlhelp. - goto end -) - -if "%1" == "qthelp" ( - %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; now you can run "qcollectiongenerator" with the ^ -.qhcp project file in %BUILDDIR%/qthelp, like this: - echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Pygments.qhcp - echo.To view the help file: - echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Pygments.ghc - goto end -) - -if "%1" == "devhelp" ( - %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. - goto end -) - -if "%1" == "epub" ( - %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The epub file is in %BUILDDIR%/epub. - goto end -) - -if "%1" == "latex" ( - %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex - if errorlevel 1 exit /b 1 - echo. - echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. - goto end -) - -if "%1" == "text" ( - %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The text files are in %BUILDDIR%/text. - goto end -) - -if "%1" == "man" ( - %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The manual pages are in %BUILDDIR%/man. - goto end -) - -if "%1" == "texinfo" ( - %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. - goto end -) - -if "%1" == "gettext" ( - %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale - if errorlevel 1 exit /b 1 - echo. - echo.Build finished. The message catalogs are in %BUILDDIR%/locale. - goto end -) - -if "%1" == "changes" ( - %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes - if errorlevel 1 exit /b 1 - echo. - echo.The overview file is in %BUILDDIR%/changes. - goto end -) - -if "%1" == "linkcheck" ( - %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck - if errorlevel 1 exit /b 1 - echo. - echo.Link check complete; look for any errors in the above output ^ -or in %BUILDDIR%/linkcheck/output.txt. - goto end -) - -if "%1" == "doctest" ( - %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest - if errorlevel 1 exit /b 1 - echo. - echo.Testing of doctests in the sources finished, look at the ^ -results in %BUILDDIR%/doctest/output.txt. - goto end -) - -:end +@ECHO OFF + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set BUILDDIR=_build +set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . +set I18NSPHINXOPTS=%SPHINXOPTS% . +if NOT "%PAPER%" == "" ( + set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% + set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% +) + +if "%1" == "" goto help + +if "%1" == "help" ( + :help + echo.Please use `make ^` where ^ is one of + echo. html to make standalone HTML files + echo. dirhtml to make HTML files named index.html in directories + echo. singlehtml to make a single large HTML file + echo. pickle to make pickle files + echo. json to make JSON files + echo. htmlhelp to make HTML files and a HTML help project + echo. qthelp to make HTML files and a qthelp project + echo. devhelp to make HTML files and a Devhelp project + echo. epub to make an epub + echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter + echo. text to make text files + echo. man to make manual pages + echo. texinfo to make Texinfo files + echo. gettext to make PO message catalogs + echo. changes to make an overview over all changed/added/deprecated items + echo. linkcheck to check all external links for integrity + echo. doctest to run all doctests embedded in the documentation if enabled + goto end +) + +if "%1" == "clean" ( + for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i + del /q /s %BUILDDIR%\* + goto end +) + +if "%1" == "html" ( + %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/html. + goto end +) + +if "%1" == "dirhtml" ( + %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. + goto end +) + +if "%1" == "singlehtml" ( + %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. + goto end +) + +if "%1" == "pickle" ( + %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the pickle files. + goto end +) + +if "%1" == "json" ( + %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the JSON files. + goto end +) + +if "%1" == "htmlhelp" ( + %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run HTML Help Workshop with the ^ +.hhp project file in %BUILDDIR%/htmlhelp. + goto end +) + +if "%1" == "qthelp" ( + %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run "qcollectiongenerator" with the ^ +.qhcp project file in %BUILDDIR%/qthelp, like this: + echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Pygments.qhcp + echo.To view the help file: + echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Pygments.ghc + goto end +) + +if "%1" == "devhelp" ( + %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. + goto end +) + +if "%1" == "epub" ( + %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The epub file is in %BUILDDIR%/epub. + goto end +) + +if "%1" == "latex" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "text" ( + %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The text files are in %BUILDDIR%/text. + goto end +) + +if "%1" == "man" ( + %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The manual pages are in %BUILDDIR%/man. + goto end +) + +if "%1" == "texinfo" ( + %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. + goto end +) + +if "%1" == "gettext" ( + %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The message catalogs are in %BUILDDIR%/locale. + goto end +) + +if "%1" == "changes" ( + %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes + if errorlevel 1 exit /b 1 + echo. + echo.The overview file is in %BUILDDIR%/changes. + goto end +) + +if "%1" == "linkcheck" ( + %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck + if errorlevel 1 exit /b 1 + echo. + echo.Link check complete; look for any errors in the above output ^ +or in %BUILDDIR%/linkcheck/output.txt. + goto end +) + +if "%1" == "doctest" ( + %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest + if errorlevel 1 exit /b 1 + echo. + echo.Testing of doctests in the sources finished, look at the ^ +results in %BUILDDIR%/doctest/output.txt. + goto end +) + +:end diff --git a/doc/pygmentize.1 b/doc/pygmentize.1 old mode 100644 new mode 100755 index 71bb6f9..651059d --- a/doc/pygmentize.1 +++ b/doc/pygmentize.1 @@ -1,94 +1,94 @@ -.TH PYGMENTIZE 1 "February 15, 2007" - -.SH NAME -pygmentize \- highlights the input file - -.SH SYNOPSIS -.B \fBpygmentize\fP -.RI [-l\ \fI\fP]\ [-F\ \fI\fP[:\fI\fP]]\ [-f\ \fI\fP] -.RI [-O\ \fI\fP]\ [-P\ \fI\fP]\ [-o\ \fI\fP]\ [\fI\fP] -.br -.B \fBpygmentize\fP -.RI -S\ \fI - - -

%(title)s

- -''' - -DOC_HEADER_EXTERNALCSS = '''\ - - - - - %(title)s - - - - -

%(title)s

- -''' - -DOC_FOOTER = '''\ - - -''' - - -class HtmlFormatter(Formatter): - r""" - Format tokens as HTML 4 ```` tags within a ``
`` tag, wrapped
-    in a ``
`` tag. The ``
``'s CSS class can be set by the `cssclass` - option. - - If the `linenos` option is set to ``"table"``, the ``
`` is
-    additionally wrapped inside a ```` which has one row and two
-    cells: one containing the line numbers and one containing the code.
-    Example:
-
-    .. sourcecode:: html
-
-        
-
- - -
-
1
-            2
-
-
def foo(bar):
-              pass
-            
-
- - (whitespace added to improve clarity). - - Wrapping can be disabled using the `nowrap` option. - - A list of lines can be specified using the `hl_lines` option to make these - lines highlighted (as of Pygments 0.11). - - With the `full` option, a complete HTML 4 document is output, including - the style definitions inside a `` + + +

%(title)s

+ +''' + +DOC_HEADER_EXTERNALCSS = '''\ + + + + + %(title)s + + + + +

%(title)s

+ +''' + +DOC_FOOTER = '''\ + + +''' + + +class HtmlFormatter(Formatter): + r""" + Format tokens as HTML 4 ```` tags within a ``
`` tag, wrapped
+    in a ``
`` tag. The ``
``'s CSS class can be set by the `cssclass` + option. + + If the `linenos` option is set to ``"table"``, the ``
`` is
+    additionally wrapped inside a ```` which has one row and two
+    cells: one containing the line numbers and one containing the code.
+    Example:
+
+    .. sourcecode:: html
+
+        
+
+ + +
+
1
+            2
+
+
def foo(bar):
+              pass
+            
+
+ + (whitespace added to improve clarity). + + Wrapping can be disabled using the `nowrap` option. + + A list of lines can be specified using the `hl_lines` option to make these + lines highlighted (as of Pygments 0.11). + + With the `full` option, a complete HTML 4 document is output, including + the style definitions inside a ``$)', _handle_cssblock), + + include('keywords'), + include('inline'), + ], + 'keywords': [ + (words(( + '\\define', '\\end', 'caption', 'created', 'modified', 'tags', + 'title', 'type'), prefix=r'^', suffix=r'\b'), + Keyword), + ], + 'inline': [ + # escape + (r'\\.', Text), + # created or modified date + (r'\d{17}', Number.Integer), + # italics + (r'(\s)(//[^/]+//)((?=\W|\n))', + bygroups(Text, Generic.Emph, Text)), + # superscript + (r'(\s)(\^\^[^\^]+\^\^)', bygroups(Text, Generic.Emph)), + # subscript + (r'(\s)(,,[^,]+,,)', bygroups(Text, Generic.Emph)), + # underscore + (r'(\s)(__[^_]+__)', bygroups(Text, Generic.Strong)), + # bold + (r"(\s)(''[^']+'')((?=\W|\n))", + bygroups(Text, Generic.Strong, Text)), + # strikethrough + (r'(\s)(~~[^~]+~~)((?=\W|\n))', + bygroups(Text, Generic.Deleted, Text)), + # TiddlyWiki variables + (r'<<[^>]+>>', Name.Tag), + (r'\$\$[^$]+\$\$', Name.Tag), + (r'\$\([^)]+\)\$', Name.Tag), + # TiddlyWiki style or class + (r'^@@.*$', Name.Tag), + # HTML tags + (r']+>', Name.Tag), + # inline code + (r'`[^`]+`', String.Backtick), + # HTML escaped symbols + (r'&\S*?;', String.Regex), + # Wiki links + (r'(\[{2})([^]\|]+)(\]{2})', bygroups(Text, Name.Tag, Text)), + # External links + (r'(\[{2})([^]\|]+)(\|)([^]\|]+)(\]{2})', + bygroups(Text, Name.Tag, Text, Name.Attribute, Text)), + # Transclusion + (r'(\{{2})([^}]+)(\}{2})', bygroups(Text, Name.Tag, Text)), + # URLs + (r'(\b.?.?tps?://[^\s"]+)', bygroups(Name.Attribute)), + + # general text, must come last! + (r'[\w]+', Text), + (r'.', Text) + ], + } + + def __init__(self, **options): + self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True) + RegexLexer.__init__(self, **options) diff --git a/pygments/lexers/math.py b/pygments/lexers/math.py old mode 100644 new mode 100755 index 7311508..ad9720b --- a/pygments/lexers/math.py +++ b/pygments/lexers/math.py @@ -1,21 +1,21 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.math - ~~~~~~~~~~~~~~~~~~~~ - - Just export lexers that were contained in this module. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexers.python import NumPyLexer -from pygments.lexers.matlab import MatlabLexer, MatlabSessionLexer, \ - OctaveLexer, ScilabLexer -from pygments.lexers.julia import JuliaLexer, JuliaConsoleLexer -from pygments.lexers.r import RConsoleLexer, SLexer, RdLexer -from pygments.lexers.modeling import BugsLexer, JagsLexer, StanLexer -from pygments.lexers.idl import IDLLexer -from pygments.lexers.algebra import MuPADLexer - -__all__ = [] +# -*- coding: utf-8 -*- +""" + pygments.lexers.math + ~~~~~~~~~~~~~~~~~~~~ + + Just export lexers that were contained in this module. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexers.python import NumPyLexer +from pygments.lexers.matlab import MatlabLexer, MatlabSessionLexer, \ + OctaveLexer, ScilabLexer +from pygments.lexers.julia import JuliaLexer, JuliaConsoleLexer +from pygments.lexers.r import RConsoleLexer, SLexer, RdLexer +from pygments.lexers.modeling import BugsLexer, JagsLexer, StanLexer +from pygments.lexers.idl import IDLLexer +from pygments.lexers.algebra import MuPADLexer + +__all__ = [] diff --git a/pygments/lexers/matlab.py b/pygments/lexers/matlab.py old mode 100644 new mode 100755 index d39f1ff..63eba88 --- a/pygments/lexers/matlab.py +++ b/pygments/lexers/matlab.py @@ -1,692 +1,716 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.matlab - ~~~~~~~~~~~~~~~~~~~~~~ - - Lexers for Matlab and related languages. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import Lexer, RegexLexer, bygroups, words, do_insertions -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Generic, Whitespace - -from pygments.lexers import _scilab_builtins - -__all__ = ['MatlabLexer', 'MatlabSessionLexer', 'OctaveLexer', 'ScilabLexer'] - - -class MatlabLexer(RegexLexer): - """ - For Matlab source code. - - .. versionadded:: 0.10 - """ - name = 'Matlab' - aliases = ['matlab'] - filenames = ['*.m'] - mimetypes = ['text/matlab'] - - # - # These lists are generated automatically. - # Run the following in bash shell: - # - # for f in elfun specfun elmat; do - # echo -n "$f = " - # matlab -nojvm -r "help $f;exit;" | perl -ne \ - # 'push(@c,$1) if /^ (\w+)\s+-/; END {print q{["}.join(q{","},@c).qq{"]\n};}' - # done - # - # elfun: Elementary math functions - # specfun: Special Math functions - # elmat: Elementary matrices and matrix manipulation - # - # taken from Matlab version 7.4.0.336 (R2007a) - # - elfun = ("sin", "sind", "sinh", "asin", "asind", "asinh", "cos", "cosd", "cosh", - "acos", "acosd", "acosh", "tan", "tand", "tanh", "atan", "atand", "atan2", - "atanh", "sec", "secd", "sech", "asec", "asecd", "asech", "csc", "cscd", - "csch", "acsc", "acscd", "acsch", "cot", "cotd", "coth", "acot", "acotd", - "acoth", "hypot", "exp", "expm1", "log", "log1p", "log10", "log2", "pow2", - "realpow", "reallog", "realsqrt", "sqrt", "nthroot", "nextpow2", "abs", - "angle", "complex", "conj", "imag", "real", "unwrap", "isreal", "cplxpair", - "fix", "floor", "ceil", "round", "mod", "rem", "sign") - specfun = ("airy", "besselj", "bessely", "besselh", "besseli", "besselk", "beta", - "betainc", "betaln", "ellipj", "ellipke", "erf", "erfc", "erfcx", - "erfinv", "expint", "gamma", "gammainc", "gammaln", "psi", "legendre", - "cross", "dot", "factor", "isprime", "primes", "gcd", "lcm", "rat", - "rats", "perms", "nchoosek", "factorial", "cart2sph", "cart2pol", - "pol2cart", "sph2cart", "hsv2rgb", "rgb2hsv") - elmat = ("zeros", "ones", "eye", "repmat", "rand", "randn", "linspace", "logspace", - "freqspace", "meshgrid", "accumarray", "size", "length", "ndims", "numel", - "disp", "isempty", "isequal", "isequalwithequalnans", "cat", "reshape", - "diag", "blkdiag", "tril", "triu", "fliplr", "flipud", "flipdim", "rot90", - "find", "end", "sub2ind", "ind2sub", "bsxfun", "ndgrid", "permute", - "ipermute", "shiftdim", "circshift", "squeeze", "isscalar", "isvector", - "ans", "eps", "realmax", "realmin", "pi", "i", "inf", "nan", "isnan", - "isinf", "isfinite", "j", "why", "compan", "gallery", "hadamard", "hankel", - "hilb", "invhilb", "magic", "pascal", "rosser", "toeplitz", "vander", - "wilkinson") - - _operators = r'-|==|~=|<=|>=|<|>|&&|&|~|\|\|?|\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\' - - tokens = { - 'root': [ - # line starting with '!' is sent as a system command. not sure what - # label to use... - (r'^!.*', String.Other), - (r'%\{\s*\n', Comment.Multiline, 'blockcomment'), - (r'%.*$', Comment), - (r'^\s*function\b', Keyword, 'deffunc'), - - # from 'iskeyword' on version 7.11 (R2010): - # Check that there is no preceding dot, as keywords are valid field - # names. - (words(('break', 'case', 'catch', 'classdef', 'continue', 'else', - 'elseif', 'end', 'enumerated', 'events', 'for', 'function', - 'global', 'if', 'methods', 'otherwise', 'parfor', - 'persistent', 'properties', 'return', 'spmd', 'switch', - 'try', 'while'), - prefix=r'(?. - - .. versionadded:: 0.10 - """ - name = 'Matlab session' - aliases = ['matlabsession'] - - def get_tokens_unprocessed(self, text): - mlexer = MatlabLexer(**self.options) - - curcode = '' - insertions = [] - - for match in line_re.finditer(text): - line = match.group() - - if line.startswith('>> '): - insertions.append((len(curcode), - [(0, Generic.Prompt, line[:3])])) - curcode += line[3:] - - elif line.startswith('>>'): - insertions.append((len(curcode), - [(0, Generic.Prompt, line[:2])])) - curcode += line[2:] - - elif line.startswith('???'): - - idx = len(curcode) - - # without is showing error on same line as before...? - # line = "\n" + line - token = (0, Generic.Traceback, line) - insertions.append((idx, [token])) - - else: - if curcode: - for item in do_insertions( - insertions, mlexer.get_tokens_unprocessed(curcode)): - yield item - curcode = '' - insertions = [] - - yield match.start(), Generic.Output, line - - if curcode: # or item: - for item in do_insertions( - insertions, mlexer.get_tokens_unprocessed(curcode)): - yield item - - -class OctaveLexer(RegexLexer): - """ - For GNU Octave source code. - - .. versionadded:: 1.5 - """ - name = 'Octave' - aliases = ['octave'] - filenames = ['*.m'] - mimetypes = ['text/octave'] - - # These lists are generated automatically. - # Run the following in bash shell: - # - # First dump all of the Octave manual into a plain text file: - # - # $ info octave --subnodes -o octave-manual - # - # Now grep through it: - - # for i in \ - # "Built-in Function" "Command" "Function File" \ - # "Loadable Function" "Mapping Function"; - # do - # perl -e '@name = qw('"$i"'); - # print lc($name[0]),"_kw = [\n"'; - # - # perl -n -e 'print "\"$1\",\n" if /-- '"$i"': .* (\w*) \(/;' \ - # octave-manual | sort | uniq ; - # echo "]" ; - # echo; - # done - - # taken from Octave Mercurial changeset 8cc154f45e37 (30-jan-2011) - - builtin_kw = ( - "addlistener", "addpath", "addproperty", "all", - "and", "any", "argnames", "argv", "assignin", - "atexit", "autoload", - "available_graphics_toolkits", "beep_on_error", - "bitand", "bitmax", "bitor", "bitshift", "bitxor", - "cat", "cell", "cellstr", "char", "class", "clc", - "columns", "command_line_path", - "completion_append_char", "completion_matches", - "complex", "confirm_recursive_rmdir", "cputime", - "crash_dumps_octave_core", "ctranspose", "cumprod", - "cumsum", "debug_on_error", "debug_on_interrupt", - "debug_on_warning", "default_save_options", - "dellistener", "diag", "diff", "disp", - "doc_cache_file", "do_string_escapes", "double", - "drawnow", "e", "echo_executing_commands", "eps", - "eq", "errno", "errno_list", "error", "eval", - "evalin", "exec", "exist", "exit", "eye", "false", - "fclear", "fclose", "fcntl", "fdisp", "feof", - "ferror", "feval", "fflush", "fgetl", "fgets", - "fieldnames", "file_in_loadpath", "file_in_path", - "filemarker", "filesep", "find_dir_in_path", - "fixed_point_format", "fnmatch", "fopen", "fork", - "formula", "fprintf", "fputs", "fread", "freport", - "frewind", "fscanf", "fseek", "fskipl", "ftell", - "functions", "fwrite", "ge", "genpath", "get", - "getegid", "getenv", "geteuid", "getgid", - "getpgrp", "getpid", "getppid", "getuid", "glob", - "gt", "gui_mode", "history_control", - "history_file", "history_size", - "history_timestamp_format_string", "home", - "horzcat", "hypot", "ifelse", - "ignore_function_time_stamp", "inferiorto", - "info_file", "info_program", "inline", "input", - "intmax", "intmin", "ipermute", - "is_absolute_filename", "isargout", "isbool", - "iscell", "iscellstr", "ischar", "iscomplex", - "isempty", "isfield", "isfloat", "isglobal", - "ishandle", "isieee", "isindex", "isinteger", - "islogical", "ismatrix", "ismethod", "isnull", - "isnumeric", "isobject", "isreal", - "is_rooted_relative_filename", "issorted", - "isstruct", "isvarname", "kbhit", "keyboard", - "kill", "lasterr", "lasterror", "lastwarn", - "ldivide", "le", "length", "link", "linspace", - "logical", "lstat", "lt", "make_absolute_filename", - "makeinfo_program", "max_recursion_depth", "merge", - "methods", "mfilename", "minus", "mislocked", - "mkdir", "mkfifo", "mkstemp", "mldivide", "mlock", - "mouse_wheel_zoom", "mpower", "mrdivide", "mtimes", - "munlock", "nargin", "nargout", - "native_float_format", "ndims", "ne", "nfields", - "nnz", "norm", "not", "numel", "nzmax", - "octave_config_info", "octave_core_file_limit", - "octave_core_file_name", - "octave_core_file_options", "ones", "or", - "output_max_field_width", "output_precision", - "page_output_immediately", "page_screen_output", - "path", "pathsep", "pause", "pclose", "permute", - "pi", "pipe", "plus", "popen", "power", - "print_empty_dimensions", "printf", - "print_struct_array_contents", "prod", - "program_invocation_name", "program_name", - "putenv", "puts", "pwd", "quit", "rats", "rdivide", - "readdir", "readlink", "read_readline_init_file", - "realmax", "realmin", "rehash", "rename", - "repelems", "re_read_readline_init_file", "reset", - "reshape", "resize", "restoredefaultpath", - "rethrow", "rmdir", "rmfield", "rmpath", "rows", - "save_header_format_string", "save_precision", - "saving_history", "scanf", "set", "setenv", - "shell_cmd", "sighup_dumps_octave_core", - "sigterm_dumps_octave_core", "silent_functions", - "single", "size", "size_equal", "sizemax", - "sizeof", "sleep", "source", "sparse_auto_mutate", - "split_long_rows", "sprintf", "squeeze", "sscanf", - "stat", "stderr", "stdin", "stdout", "strcmp", - "strcmpi", "string_fill_char", "strncmp", - "strncmpi", "struct", "struct_levels_to_print", - "strvcat", "subsasgn", "subsref", "sum", "sumsq", - "superiorto", "suppress_verbose_help_message", - "symlink", "system", "tic", "tilde_expand", - "times", "tmpfile", "tmpnam", "toc", "toupper", - "transpose", "true", "typeinfo", "umask", "uminus", - "uname", "undo_string_escapes", "unlink", "uplus", - "upper", "usage", "usleep", "vec", "vectorize", - "vertcat", "waitpid", "warning", "warranty", - "whos_line_format", "yes_or_no", "zeros", - "inf", "Inf", "nan", "NaN") - - command_kw = ("close", "load", "who", "whos") - - function_kw = ( - "accumarray", "accumdim", "acosd", "acotd", - "acscd", "addtodate", "allchild", "ancestor", - "anova", "arch_fit", "arch_rnd", "arch_test", - "area", "arma_rnd", "arrayfun", "ascii", "asctime", - "asecd", "asind", "assert", "atand", - "autoreg_matrix", "autumn", "axes", "axis", "bar", - "barh", "bartlett", "bartlett_test", "beep", - "betacdf", "betainv", "betapdf", "betarnd", - "bicgstab", "bicubic", "binary", "binocdf", - "binoinv", "binopdf", "binornd", "bitcmp", - "bitget", "bitset", "blackman", "blanks", - "blkdiag", "bone", "box", "brighten", "calendar", - "cast", "cauchy_cdf", "cauchy_inv", "cauchy_pdf", - "cauchy_rnd", "caxis", "celldisp", "center", "cgs", - "chisquare_test_homogeneity", - "chisquare_test_independence", "circshift", "cla", - "clabel", "clf", "clock", "cloglog", "closereq", - "colon", "colorbar", "colormap", "colperm", - "comet", "common_size", "commutation_matrix", - "compan", "compare_versions", "compass", - "computer", "cond", "condest", "contour", - "contourc", "contourf", "contrast", "conv", - "convhull", "cool", "copper", "copyfile", "cor", - "corrcoef", "cor_test", "cosd", "cotd", "cov", - "cplxpair", "cross", "cscd", "cstrcat", "csvread", - "csvwrite", "ctime", "cumtrapz", "curl", "cut", - "cylinder", "date", "datenum", "datestr", - "datetick", "datevec", "dblquad", "deal", - "deblank", "deconv", "delaunay", "delaunayn", - "delete", "demo", "detrend", "diffpara", "diffuse", - "dir", "discrete_cdf", "discrete_inv", - "discrete_pdf", "discrete_rnd", "display", - "divergence", "dlmwrite", "dos", "dsearch", - "dsearchn", "duplication_matrix", "durbinlevinson", - "ellipsoid", "empirical_cdf", "empirical_inv", - "empirical_pdf", "empirical_rnd", "eomday", - "errorbar", "etime", "etreeplot", "example", - "expcdf", "expinv", "expm", "exppdf", "exprnd", - "ezcontour", "ezcontourf", "ezmesh", "ezmeshc", - "ezplot", "ezpolar", "ezsurf", "ezsurfc", "factor", - "factorial", "fail", "fcdf", "feather", "fftconv", - "fftfilt", "fftshift", "figure", "fileattrib", - "fileparts", "fill", "findall", "findobj", - "findstr", "finv", "flag", "flipdim", "fliplr", - "flipud", "fpdf", "fplot", "fractdiff", "freqz", - "freqz_plot", "frnd", "fsolve", - "f_test_regression", "ftp", "fullfile", "fzero", - "gamcdf", "gaminv", "gampdf", "gamrnd", "gca", - "gcbf", "gcbo", "gcf", "genvarname", "geocdf", - "geoinv", "geopdf", "geornd", "getfield", "ginput", - "glpk", "gls", "gplot", "gradient", - "graphics_toolkit", "gray", "grid", "griddata", - "griddatan", "gtext", "gunzip", "gzip", "hadamard", - "hamming", "hankel", "hanning", "hggroup", - "hidden", "hilb", "hist", "histc", "hold", "hot", - "hotelling_test", "housh", "hsv", "hurst", - "hygecdf", "hygeinv", "hygepdf", "hygernd", - "idivide", "ifftshift", "image", "imagesc", - "imfinfo", "imread", "imshow", "imwrite", "index", - "info", "inpolygon", "inputname", "interpft", - "interpn", "intersect", "invhilb", "iqr", "isa", - "isdefinite", "isdir", "is_duplicate_entry", - "isequal", "isequalwithequalnans", "isfigure", - "ishermitian", "ishghandle", "is_leap_year", - "isletter", "ismac", "ismember", "ispc", "isprime", - "isprop", "isscalar", "issquare", "isstrprop", - "issymmetric", "isunix", "is_valid_file_id", - "isvector", "jet", "kendall", - "kolmogorov_smirnov_cdf", - "kolmogorov_smirnov_test", "kruskal_wallis_test", - "krylov", "kurtosis", "laplace_cdf", "laplace_inv", - "laplace_pdf", "laplace_rnd", "legend", "legendre", - "license", "line", "linkprop", "list_primes", - "loadaudio", "loadobj", "logistic_cdf", - "logistic_inv", "logistic_pdf", "logistic_rnd", - "logit", "loglog", "loglogerr", "logm", "logncdf", - "logninv", "lognpdf", "lognrnd", "logspace", - "lookfor", "ls_command", "lsqnonneg", "magic", - "mahalanobis", "manova", "matlabroot", - "mcnemar_test", "mean", "meansq", "median", "menu", - "mesh", "meshc", "meshgrid", "meshz", "mexext", - "mget", "mkpp", "mode", "moment", "movefile", - "mpoles", "mput", "namelengthmax", "nargchk", - "nargoutchk", "nbincdf", "nbininv", "nbinpdf", - "nbinrnd", "nchoosek", "ndgrid", "newplot", "news", - "nonzeros", "normcdf", "normest", "norminv", - "normpdf", "normrnd", "now", "nthroot", "null", - "ocean", "ols", "onenormest", "optimget", - "optimset", "orderfields", "orient", "orth", - "pack", "pareto", "parseparams", "pascal", "patch", - "pathdef", "pcg", "pchip", "pcolor", "pcr", - "peaks", "periodogram", "perl", "perms", "pie", - "pink", "planerot", "playaudio", "plot", - "plotmatrix", "plotyy", "poisscdf", "poissinv", - "poisspdf", "poissrnd", "polar", "poly", - "polyaffine", "polyarea", "polyderiv", "polyfit", - "polygcd", "polyint", "polyout", "polyreduce", - "polyval", "polyvalm", "postpad", "powerset", - "ppder", "ppint", "ppjumps", "ppplot", "ppval", - "pqpnonneg", "prepad", "primes", "print", - "print_usage", "prism", "probit", "qp", "qqplot", - "quadcc", "quadgk", "quadl", "quadv", "quiver", - "qzhess", "rainbow", "randi", "range", "rank", - "ranks", "rat", "reallog", "realpow", "realsqrt", - "record", "rectangle_lw", "rectangle_sw", - "rectint", "refresh", "refreshdata", - "regexptranslate", "repmat", "residue", "ribbon", - "rindex", "roots", "rose", "rosser", "rotdim", - "rref", "run", "run_count", "rundemos", "run_test", - "runtests", "saveas", "saveaudio", "saveobj", - "savepath", "scatter", "secd", "semilogx", - "semilogxerr", "semilogy", "semilogyerr", - "setaudio", "setdiff", "setfield", "setxor", - "shading", "shift", "shiftdim", "sign_test", - "sinc", "sind", "sinetone", "sinewave", "skewness", - "slice", "sombrero", "sortrows", "spaugment", - "spconvert", "spdiags", "spearman", "spectral_adf", - "spectral_xdf", "specular", "speed", "spencer", - "speye", "spfun", "sphere", "spinmap", "spline", - "spones", "sprand", "sprandn", "sprandsym", - "spring", "spstats", "spy", "sqp", "stairs", - "statistics", "std", "stdnormal_cdf", - "stdnormal_inv", "stdnormal_pdf", "stdnormal_rnd", - "stem", "stft", "strcat", "strchr", "strjust", - "strmatch", "strread", "strsplit", "strtok", - "strtrim", "strtrunc", "structfun", "studentize", - "subplot", "subsindex", "subspace", "substr", - "substruct", "summer", "surf", "surface", "surfc", - "surfl", "surfnorm", "svds", "swapbytes", - "sylvester_matrix", "symvar", "synthesis", "table", - "tand", "tar", "tcdf", "tempdir", "tempname", - "test", "text", "textread", "textscan", "tinv", - "title", "toeplitz", "tpdf", "trace", "trapz", - "treelayout", "treeplot", "triangle_lw", - "triangle_sw", "tril", "trimesh", "triplequad", - "triplot", "trisurf", "triu", "trnd", "tsearchn", - "t_test", "t_test_regression", "type", "unidcdf", - "unidinv", "unidpdf", "unidrnd", "unifcdf", - "unifinv", "unifpdf", "unifrnd", "union", "unique", - "unix", "unmkpp", "unpack", "untabify", "untar", - "unwrap", "unzip", "u_test", "validatestring", - "vander", "var", "var_test", "vech", "ver", - "version", "view", "voronoi", "voronoin", - "waitforbuttonpress", "wavread", "wavwrite", - "wblcdf", "wblinv", "wblpdf", "wblrnd", "weekday", - "welch_test", "what", "white", "whitebg", - "wienrnd", "wilcoxon_test", "wilkinson", "winter", - "xlabel", "xlim", "ylabel", "yulewalker", "zip", - "zlabel", "z_test") - - loadable_kw = ( - "airy", "amd", "balance", "besselh", "besseli", - "besselj", "besselk", "bessely", "bitpack", - "bsxfun", "builtin", "ccolamd", "cellfun", - "cellslices", "chol", "choldelete", "cholinsert", - "cholinv", "cholshift", "cholupdate", "colamd", - "colloc", "convhulln", "convn", "csymamd", - "cummax", "cummin", "daspk", "daspk_options", - "dasrt", "dasrt_options", "dassl", "dassl_options", - "dbclear", "dbdown", "dbstack", "dbstatus", - "dbstop", "dbtype", "dbup", "dbwhere", "det", - "dlmread", "dmperm", "dot", "eig", "eigs", - "endgrent", "endpwent", "etree", "fft", "fftn", - "fftw", "filter", "find", "full", "gcd", - "getgrent", "getgrgid", "getgrnam", "getpwent", - "getpwnam", "getpwuid", "getrusage", "givens", - "gmtime", "gnuplot_binary", "hess", "ifft", - "ifftn", "inv", "isdebugmode", "issparse", "kron", - "localtime", "lookup", "lsode", "lsode_options", - "lu", "luinc", "luupdate", "matrix_type", "max", - "min", "mktime", "pinv", "qr", "qrdelete", - "qrinsert", "qrshift", "qrupdate", "quad", - "quad_options", "qz", "rand", "rande", "randg", - "randn", "randp", "randperm", "rcond", "regexp", - "regexpi", "regexprep", "schur", "setgrent", - "setpwent", "sort", "spalloc", "sparse", "spparms", - "sprank", "sqrtm", "strfind", "strftime", - "strptime", "strrep", "svd", "svd_driver", "syl", - "symamd", "symbfact", "symrcm", "time", "tsearch", - "typecast", "urlread", "urlwrite") - - mapping_kw = ( - "abs", "acos", "acosh", "acot", "acoth", "acsc", - "acsch", "angle", "arg", "asec", "asech", "asin", - "asinh", "atan", "atanh", "beta", "betainc", - "betaln", "bincoeff", "cbrt", "ceil", "conj", "cos", - "cosh", "cot", "coth", "csc", "csch", "erf", "erfc", - "erfcx", "erfinv", "exp", "finite", "fix", "floor", - "fmod", "gamma", "gammainc", "gammaln", "imag", - "isalnum", "isalpha", "isascii", "iscntrl", - "isdigit", "isfinite", "isgraph", "isinf", - "islower", "isna", "isnan", "isprint", "ispunct", - "isspace", "isupper", "isxdigit", "lcm", "lgamma", - "log", "lower", "mod", "real", "rem", "round", - "roundb", "sec", "sech", "sign", "sin", "sinh", - "sqrt", "tan", "tanh", "toascii", "tolower", "xor") - - builtin_consts = ( - "EDITOR", "EXEC_PATH", "I", "IMAGE_PATH", "NA", - "OCTAVE_HOME", "OCTAVE_VERSION", "PAGER", - "PAGER_FLAGS", "SEEK_CUR", "SEEK_END", "SEEK_SET", - "SIG", "S_ISBLK", "S_ISCHR", "S_ISDIR", "S_ISFIFO", - "S_ISLNK", "S_ISREG", "S_ISSOCK", "WCONTINUE", - "WCOREDUMP", "WEXITSTATUS", "WIFCONTINUED", - "WIFEXITED", "WIFSIGNALED", "WIFSTOPPED", "WNOHANG", - "WSTOPSIG", "WTERMSIG", "WUNTRACED") - - tokens = { - 'root': [ - # We should look into multiline comments - (r'[%#].*$', Comment), - (r'^\s*function\b', Keyword, 'deffunc'), - - # from 'iskeyword' on hg changeset 8cc154f45e37 - (words(( - '__FILE__', '__LINE__', 'break', 'case', 'catch', 'classdef', 'continue', 'do', 'else', - 'elseif', 'end', 'end_try_catch', 'end_unwind_protect', 'endclassdef', - 'endevents', 'endfor', 'endfunction', 'endif', 'endmethods', 'endproperties', - 'endswitch', 'endwhile', 'events', 'for', 'function', 'get', 'global', 'if', 'methods', - 'otherwise', 'persistent', 'properties', 'return', 'set', 'static', 'switch', 'try', - 'until', 'unwind_protect', 'unwind_protect_cleanup', 'while'), suffix=r'\b'), - Keyword), - - (words(builtin_kw + command_kw + function_kw + loadable_kw + mapping_kw, - suffix=r'\b'), Name.Builtin), - - (words(builtin_consts, suffix=r'\b'), Name.Constant), - - # operators in Octave but not Matlab: - (r'-=|!=|!|/=|--', Operator), - # operators: - (r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator), - # operators in Octave but not Matlab requiring escape for re: - (r'\*=|\+=|\^=|\/=|\\=|\*\*|\+\+|\.\*\*', Operator), - # operators requiring escape for re: - (r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator), - - - # punctuation: - (r'[\[\](){}:@.,]', Punctuation), - (r'=|:|;', Punctuation), - - (r'"[^"]*"', String), - - (r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float), - (r'\d+[eEf][+-]?[0-9]+', Number.Float), - (r'\d+', Number.Integer), - - # quote can be transpose, instead of string: - # (not great, but handles common cases...) - (r'(?<=[\w)\].])\'+', Operator), - (r'(?|<=|>=|&&|&|~|\|\|?', Operator), - # operators requiring escape for re: - (r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator), - - # punctuation: - (r'[\[\](){}@.,=:;]', Punctuation), - - (r'"[^"]*"', String), - - # quote can be transpose, instead of string: - # (not great, but handles common cases...) - (r'(?<=[\w)\].])\'+', Operator), - (r'(?=|<|>|&&|&|~|\|\|?|\.\*|\*|\+|\.\^|\.\\|\./|/|\\' + + tokens = { + 'root': [ + # line starting with '!' is sent as a system command. not sure what + # label to use... + (r'^!.*', String.Other), + (r'%\{\s*\n', Comment.Multiline, 'blockcomment'), + (r'%.*$', Comment), + (r'^\s*function\b', Keyword, 'deffunc'), + + # from 'iskeyword' on version 9.4 (R2018a): + # Check that there is no preceding dot, as keywords are valid field + # names. + (words(('break', 'case', 'catch', 'classdef', 'continue', 'else', + 'elseif', 'end', 'for', 'function', + 'global', 'if', 'otherwise', 'parfor', + 'persistent', 'return', 'spmd', 'switch', + 'try', 'while'), + prefix=r'(?. + + .. versionadded:: 0.10 + """ + name = 'Matlab session' + aliases = ['matlabsession'] + + def get_tokens_unprocessed(self, text): + mlexer = MatlabLexer(**self.options) + + curcode = '' + insertions = [] + continuation = False + + for match in line_re.finditer(text): + line = match.group() + + if line.startswith('>> '): + insertions.append((len(curcode), + [(0, Generic.Prompt, line[:3])])) + curcode += line[3:] + + elif line.startswith('>>'): + insertions.append((len(curcode), + [(0, Generic.Prompt, line[:2])])) + curcode += line[2:] + + elif line.startswith('???'): + + idx = len(curcode) + + # without is showing error on same line as before...? + # line = "\n" + line + token = (0, Generic.Traceback, line) + insertions.append((idx, [token])) + elif continuation: + # line_start is the length of the most recent prompt symbol + line_start = len(insertions[-1][-1][-1]) + # Set leading spaces with the length of the prompt to be a generic prompt + # This keeps code aligned when prompts are removed, say with some Javascript + if line.startswith(' '*line_start): + insertions.append((len(curcode), + [(0, Generic.Prompt, line[:line_start])])) + curcode += line[line_start:] + else: + curcode += line + else: + if curcode: + yield from do_insertions( + insertions, mlexer.get_tokens_unprocessed(curcode)) + curcode = '' + insertions = [] + + yield match.start(), Generic.Output, line + + # Does not allow continuation if a comment is included after the ellipses. + # Continues any line that ends with ..., even comments (lines that start with %) + if line.strip().endswith('...'): + continuation = True + else: + continuation = False + + if curcode: # or item: + yield from do_insertions( + insertions, mlexer.get_tokens_unprocessed(curcode)) + + +class OctaveLexer(RegexLexer): + """ + For GNU Octave source code. + + .. versionadded:: 1.5 + """ + name = 'Octave' + aliases = ['octave'] + filenames = ['*.m'] + mimetypes = ['text/octave'] + + # These lists are generated automatically. + # Run the following in bash shell: + # + # First dump all of the Octave manual into a plain text file: + # + # $ info octave --subnodes -o octave-manual + # + # Now grep through it: + + # for i in \ + # "Built-in Function" "Command" "Function File" \ + # "Loadable Function" "Mapping Function"; + # do + # perl -e '@name = qw('"$i"'); + # print lc($name[0]),"_kw = [\n"'; + # + # perl -n -e 'print "\"$1\",\n" if /-- '"$i"': .* (\w*) \(/;' \ + # octave-manual | sort | uniq ; + # echo "]" ; + # echo; + # done + + # taken from Octave Mercurial changeset 8cc154f45e37 (30-jan-2011) + + builtin_kw = ( + "addlistener", "addpath", "addproperty", "all", + "and", "any", "argnames", "argv", "assignin", + "atexit", "autoload", + "available_graphics_toolkits", "beep_on_error", + "bitand", "bitmax", "bitor", "bitshift", "bitxor", + "cat", "cell", "cellstr", "char", "class", "clc", + "columns", "command_line_path", + "completion_append_char", "completion_matches", + "complex", "confirm_recursive_rmdir", "cputime", + "crash_dumps_octave_core", "ctranspose", "cumprod", + "cumsum", "debug_on_error", "debug_on_interrupt", + "debug_on_warning", "default_save_options", + "dellistener", "diag", "diff", "disp", + "doc_cache_file", "do_string_escapes", "double", + "drawnow", "e", "echo_executing_commands", "eps", + "eq", "errno", "errno_list", "error", "eval", + "evalin", "exec", "exist", "exit", "eye", "false", + "fclear", "fclose", "fcntl", "fdisp", "feof", + "ferror", "feval", "fflush", "fgetl", "fgets", + "fieldnames", "file_in_loadpath", "file_in_path", + "filemarker", "filesep", "find_dir_in_path", + "fixed_point_format", "fnmatch", "fopen", "fork", + "formula", "fprintf", "fputs", "fread", "freport", + "frewind", "fscanf", "fseek", "fskipl", "ftell", + "functions", "fwrite", "ge", "genpath", "get", + "getegid", "getenv", "geteuid", "getgid", + "getpgrp", "getpid", "getppid", "getuid", "glob", + "gt", "gui_mode", "history_control", + "history_file", "history_size", + "history_timestamp_format_string", "home", + "horzcat", "hypot", "ifelse", + "ignore_function_time_stamp", "inferiorto", + "info_file", "info_program", "inline", "input", + "intmax", "intmin", "ipermute", + "is_absolute_filename", "isargout", "isbool", + "iscell", "iscellstr", "ischar", "iscomplex", + "isempty", "isfield", "isfloat", "isglobal", + "ishandle", "isieee", "isindex", "isinteger", + "islogical", "ismatrix", "ismethod", "isnull", + "isnumeric", "isobject", "isreal", + "is_rooted_relative_filename", "issorted", + "isstruct", "isvarname", "kbhit", "keyboard", + "kill", "lasterr", "lasterror", "lastwarn", + "ldivide", "le", "length", "link", "linspace", + "logical", "lstat", "lt", "make_absolute_filename", + "makeinfo_program", "max_recursion_depth", "merge", + "methods", "mfilename", "minus", "mislocked", + "mkdir", "mkfifo", "mkstemp", "mldivide", "mlock", + "mouse_wheel_zoom", "mpower", "mrdivide", "mtimes", + "munlock", "nargin", "nargout", + "native_float_format", "ndims", "ne", "nfields", + "nnz", "norm", "not", "numel", "nzmax", + "octave_config_info", "octave_core_file_limit", + "octave_core_file_name", + "octave_core_file_options", "ones", "or", + "output_max_field_width", "output_precision", + "page_output_immediately", "page_screen_output", + "path", "pathsep", "pause", "pclose", "permute", + "pi", "pipe", "plus", "popen", "power", + "print_empty_dimensions", "printf", + "print_struct_array_contents", "prod", + "program_invocation_name", "program_name", + "putenv", "puts", "pwd", "quit", "rats", "rdivide", + "readdir", "readlink", "read_readline_init_file", + "realmax", "realmin", "rehash", "rename", + "repelems", "re_read_readline_init_file", "reset", + "reshape", "resize", "restoredefaultpath", + "rethrow", "rmdir", "rmfield", "rmpath", "rows", + "save_header_format_string", "save_precision", + "saving_history", "scanf", "set", "setenv", + "shell_cmd", "sighup_dumps_octave_core", + "sigterm_dumps_octave_core", "silent_functions", + "single", "size", "size_equal", "sizemax", + "sizeof", "sleep", "source", "sparse_auto_mutate", + "split_long_rows", "sprintf", "squeeze", "sscanf", + "stat", "stderr", "stdin", "stdout", "strcmp", + "strcmpi", "string_fill_char", "strncmp", + "strncmpi", "struct", "struct_levels_to_print", + "strvcat", "subsasgn", "subsref", "sum", "sumsq", + "superiorto", "suppress_verbose_help_message", + "symlink", "system", "tic", "tilde_expand", + "times", "tmpfile", "tmpnam", "toc", "toupper", + "transpose", "true", "typeinfo", "umask", "uminus", + "uname", "undo_string_escapes", "unlink", "uplus", + "upper", "usage", "usleep", "vec", "vectorize", + "vertcat", "waitpid", "warning", "warranty", + "whos_line_format", "yes_or_no", "zeros", + "inf", "Inf", "nan", "NaN") + + command_kw = ("close", "load", "who", "whos") + + function_kw = ( + "accumarray", "accumdim", "acosd", "acotd", + "acscd", "addtodate", "allchild", "ancestor", + "anova", "arch_fit", "arch_rnd", "arch_test", + "area", "arma_rnd", "arrayfun", "ascii", "asctime", + "asecd", "asind", "assert", "atand", + "autoreg_matrix", "autumn", "axes", "axis", "bar", + "barh", "bartlett", "bartlett_test", "beep", + "betacdf", "betainv", "betapdf", "betarnd", + "bicgstab", "bicubic", "binary", "binocdf", + "binoinv", "binopdf", "binornd", "bitcmp", + "bitget", "bitset", "blackman", "blanks", + "blkdiag", "bone", "box", "brighten", "calendar", + "cast", "cauchy_cdf", "cauchy_inv", "cauchy_pdf", + "cauchy_rnd", "caxis", "celldisp", "center", "cgs", + "chisquare_test_homogeneity", + "chisquare_test_independence", "circshift", "cla", + "clabel", "clf", "clock", "cloglog", "closereq", + "colon", "colorbar", "colormap", "colperm", + "comet", "common_size", "commutation_matrix", + "compan", "compare_versions", "compass", + "computer", "cond", "condest", "contour", + "contourc", "contourf", "contrast", "conv", + "convhull", "cool", "copper", "copyfile", "cor", + "corrcoef", "cor_test", "cosd", "cotd", "cov", + "cplxpair", "cross", "cscd", "cstrcat", "csvread", + "csvwrite", "ctime", "cumtrapz", "curl", "cut", + "cylinder", "date", "datenum", "datestr", + "datetick", "datevec", "dblquad", "deal", + "deblank", "deconv", "delaunay", "delaunayn", + "delete", "demo", "detrend", "diffpara", "diffuse", + "dir", "discrete_cdf", "discrete_inv", + "discrete_pdf", "discrete_rnd", "display", + "divergence", "dlmwrite", "dos", "dsearch", + "dsearchn", "duplication_matrix", "durbinlevinson", + "ellipsoid", "empirical_cdf", "empirical_inv", + "empirical_pdf", "empirical_rnd", "eomday", + "errorbar", "etime", "etreeplot", "example", + "expcdf", "expinv", "expm", "exppdf", "exprnd", + "ezcontour", "ezcontourf", "ezmesh", "ezmeshc", + "ezplot", "ezpolar", "ezsurf", "ezsurfc", "factor", + "factorial", "fail", "fcdf", "feather", "fftconv", + "fftfilt", "fftshift", "figure", "fileattrib", + "fileparts", "fill", "findall", "findobj", + "findstr", "finv", "flag", "flipdim", "fliplr", + "flipud", "fpdf", "fplot", "fractdiff", "freqz", + "freqz_plot", "frnd", "fsolve", + "f_test_regression", "ftp", "fullfile", "fzero", + "gamcdf", "gaminv", "gampdf", "gamrnd", "gca", + "gcbf", "gcbo", "gcf", "genvarname", "geocdf", + "geoinv", "geopdf", "geornd", "getfield", "ginput", + "glpk", "gls", "gplot", "gradient", + "graphics_toolkit", "gray", "grid", "griddata", + "griddatan", "gtext", "gunzip", "gzip", "hadamard", + "hamming", "hankel", "hanning", "hggroup", + "hidden", "hilb", "hist", "histc", "hold", "hot", + "hotelling_test", "housh", "hsv", "hurst", + "hygecdf", "hygeinv", "hygepdf", "hygernd", + "idivide", "ifftshift", "image", "imagesc", + "imfinfo", "imread", "imshow", "imwrite", "index", + "info", "inpolygon", "inputname", "interpft", + "interpn", "intersect", "invhilb", "iqr", "isa", + "isdefinite", "isdir", "is_duplicate_entry", + "isequal", "isequalwithequalnans", "isfigure", + "ishermitian", "ishghandle", "is_leap_year", + "isletter", "ismac", "ismember", "ispc", "isprime", + "isprop", "isscalar", "issquare", "isstrprop", + "issymmetric", "isunix", "is_valid_file_id", + "isvector", "jet", "kendall", + "kolmogorov_smirnov_cdf", + "kolmogorov_smirnov_test", "kruskal_wallis_test", + "krylov", "kurtosis", "laplace_cdf", "laplace_inv", + "laplace_pdf", "laplace_rnd", "legend", "legendre", + "license", "line", "linkprop", "list_primes", + "loadaudio", "loadobj", "logistic_cdf", + "logistic_inv", "logistic_pdf", "logistic_rnd", + "logit", "loglog", "loglogerr", "logm", "logncdf", + "logninv", "lognpdf", "lognrnd", "logspace", + "lookfor", "ls_command", "lsqnonneg", "magic", + "mahalanobis", "manova", "matlabroot", + "mcnemar_test", "mean", "meansq", "median", "menu", + "mesh", "meshc", "meshgrid", "meshz", "mexext", + "mget", "mkpp", "mode", "moment", "movefile", + "mpoles", "mput", "namelengthmax", "nargchk", + "nargoutchk", "nbincdf", "nbininv", "nbinpdf", + "nbinrnd", "nchoosek", "ndgrid", "newplot", "news", + "nonzeros", "normcdf", "normest", "norminv", + "normpdf", "normrnd", "now", "nthroot", "null", + "ocean", "ols", "onenormest", "optimget", + "optimset", "orderfields", "orient", "orth", + "pack", "pareto", "parseparams", "pascal", "patch", + "pathdef", "pcg", "pchip", "pcolor", "pcr", + "peaks", "periodogram", "perl", "perms", "pie", + "pink", "planerot", "playaudio", "plot", + "plotmatrix", "plotyy", "poisscdf", "poissinv", + "poisspdf", "poissrnd", "polar", "poly", + "polyaffine", "polyarea", "polyderiv", "polyfit", + "polygcd", "polyint", "polyout", "polyreduce", + "polyval", "polyvalm", "postpad", "powerset", + "ppder", "ppint", "ppjumps", "ppplot", "ppval", + "pqpnonneg", "prepad", "primes", "print", + "print_usage", "prism", "probit", "qp", "qqplot", + "quadcc", "quadgk", "quadl", "quadv", "quiver", + "qzhess", "rainbow", "randi", "range", "rank", + "ranks", "rat", "reallog", "realpow", "realsqrt", + "record", "rectangle_lw", "rectangle_sw", + "rectint", "refresh", "refreshdata", + "regexptranslate", "repmat", "residue", "ribbon", + "rindex", "roots", "rose", "rosser", "rotdim", + "rref", "run", "run_count", "rundemos", "run_test", + "runtests", "saveas", "saveaudio", "saveobj", + "savepath", "scatter", "secd", "semilogx", + "semilogxerr", "semilogy", "semilogyerr", + "setaudio", "setdiff", "setfield", "setxor", + "shading", "shift", "shiftdim", "sign_test", + "sinc", "sind", "sinetone", "sinewave", "skewness", + "slice", "sombrero", "sortrows", "spaugment", + "spconvert", "spdiags", "spearman", "spectral_adf", + "spectral_xdf", "specular", "speed", "spencer", + "speye", "spfun", "sphere", "spinmap", "spline", + "spones", "sprand", "sprandn", "sprandsym", + "spring", "spstats", "spy", "sqp", "stairs", + "statistics", "std", "stdnormal_cdf", + "stdnormal_inv", "stdnormal_pdf", "stdnormal_rnd", + "stem", "stft", "strcat", "strchr", "strjust", + "strmatch", "strread", "strsplit", "strtok", + "strtrim", "strtrunc", "structfun", "studentize", + "subplot", "subsindex", "subspace", "substr", + "substruct", "summer", "surf", "surface", "surfc", + "surfl", "surfnorm", "svds", "swapbytes", + "sylvester_matrix", "symvar", "synthesis", "table", + "tand", "tar", "tcdf", "tempdir", "tempname", + "test", "text", "textread", "textscan", "tinv", + "title", "toeplitz", "tpdf", "trace", "trapz", + "treelayout", "treeplot", "triangle_lw", + "triangle_sw", "tril", "trimesh", "triplequad", + "triplot", "trisurf", "triu", "trnd", "tsearchn", + "t_test", "t_test_regression", "type", "unidcdf", + "unidinv", "unidpdf", "unidrnd", "unifcdf", + "unifinv", "unifpdf", "unifrnd", "union", "unique", + "unix", "unmkpp", "unpack", "untabify", "untar", + "unwrap", "unzip", "u_test", "validatestring", + "vander", "var", "var_test", "vech", "ver", + "version", "view", "voronoi", "voronoin", + "waitforbuttonpress", "wavread", "wavwrite", + "wblcdf", "wblinv", "wblpdf", "wblrnd", "weekday", + "welch_test", "what", "white", "whitebg", + "wienrnd", "wilcoxon_test", "wilkinson", "winter", + "xlabel", "xlim", "ylabel", "yulewalker", "zip", + "zlabel", "z_test") + + loadable_kw = ( + "airy", "amd", "balance", "besselh", "besseli", + "besselj", "besselk", "bessely", "bitpack", + "bsxfun", "builtin", "ccolamd", "cellfun", + "cellslices", "chol", "choldelete", "cholinsert", + "cholinv", "cholshift", "cholupdate", "colamd", + "colloc", "convhulln", "convn", "csymamd", + "cummax", "cummin", "daspk", "daspk_options", + "dasrt", "dasrt_options", "dassl", "dassl_options", + "dbclear", "dbdown", "dbstack", "dbstatus", + "dbstop", "dbtype", "dbup", "dbwhere", "det", + "dlmread", "dmperm", "dot", "eig", "eigs", + "endgrent", "endpwent", "etree", "fft", "fftn", + "fftw", "filter", "find", "full", "gcd", + "getgrent", "getgrgid", "getgrnam", "getpwent", + "getpwnam", "getpwuid", "getrusage", "givens", + "gmtime", "gnuplot_binary", "hess", "ifft", + "ifftn", "inv", "isdebugmode", "issparse", "kron", + "localtime", "lookup", "lsode", "lsode_options", + "lu", "luinc", "luupdate", "matrix_type", "max", + "min", "mktime", "pinv", "qr", "qrdelete", + "qrinsert", "qrshift", "qrupdate", "quad", + "quad_options", "qz", "rand", "rande", "randg", + "randn", "randp", "randperm", "rcond", "regexp", + "regexpi", "regexprep", "schur", "setgrent", + "setpwent", "sort", "spalloc", "sparse", "spparms", + "sprank", "sqrtm", "strfind", "strftime", + "strptime", "strrep", "svd", "svd_driver", "syl", + "symamd", "symbfact", "symrcm", "time", "tsearch", + "typecast", "urlread", "urlwrite") + + mapping_kw = ( + "abs", "acos", "acosh", "acot", "acoth", "acsc", + "acsch", "angle", "arg", "asec", "asech", "asin", + "asinh", "atan", "atanh", "beta", "betainc", + "betaln", "bincoeff", "cbrt", "ceil", "conj", "cos", + "cosh", "cot", "coth", "csc", "csch", "erf", "erfc", + "erfcx", "erfinv", "exp", "finite", "fix", "floor", + "fmod", "gamma", "gammainc", "gammaln", "imag", + "isalnum", "isalpha", "isascii", "iscntrl", + "isdigit", "isfinite", "isgraph", "isinf", + "islower", "isna", "isnan", "isprint", "ispunct", + "isspace", "isupper", "isxdigit", "lcm", "lgamma", + "log", "lower", "mod", "real", "rem", "round", + "roundb", "sec", "sech", "sign", "sin", "sinh", + "sqrt", "tan", "tanh", "toascii", "tolower", "xor") + + builtin_consts = ( + "EDITOR", "EXEC_PATH", "I", "IMAGE_PATH", "NA", + "OCTAVE_HOME", "OCTAVE_VERSION", "PAGER", + "PAGER_FLAGS", "SEEK_CUR", "SEEK_END", "SEEK_SET", + "SIG", "S_ISBLK", "S_ISCHR", "S_ISDIR", "S_ISFIFO", + "S_ISLNK", "S_ISREG", "S_ISSOCK", "WCONTINUE", + "WCOREDUMP", "WEXITSTATUS", "WIFCONTINUED", + "WIFEXITED", "WIFSIGNALED", "WIFSTOPPED", "WNOHANG", + "WSTOPSIG", "WTERMSIG", "WUNTRACED") + + tokens = { + 'root': [ + # We should look into multiline comments + (r'[%#].*$', Comment), + (r'^\s*function\b', Keyword, 'deffunc'), + + # from 'iskeyword' on hg changeset 8cc154f45e37 + (words(( + '__FILE__', '__LINE__', 'break', 'case', 'catch', 'classdef', 'continue', 'do', 'else', + 'elseif', 'end', 'end_try_catch', 'end_unwind_protect', 'endclassdef', + 'endevents', 'endfor', 'endfunction', 'endif', 'endmethods', 'endproperties', + 'endswitch', 'endwhile', 'events', 'for', 'function', 'get', 'global', 'if', 'methods', + 'otherwise', 'persistent', 'properties', 'return', 'set', 'static', 'switch', 'try', + 'until', 'unwind_protect', 'unwind_protect_cleanup', 'while'), suffix=r'\b'), + Keyword), + + (words(builtin_kw + command_kw + function_kw + loadable_kw + mapping_kw, + suffix=r'\b'), Name.Builtin), + + (words(builtin_consts, suffix=r'\b'), Name.Constant), + + # operators in Octave but not Matlab: + (r'-=|!=|!|/=|--', Operator), + # operators: + (r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator), + # operators in Octave but not Matlab requiring escape for re: + (r'\*=|\+=|\^=|\/=|\\=|\*\*|\+\+|\.\*\*', Operator), + # operators requiring escape for re: + (r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator), + + + # punctuation: + (r'[\[\](){}:@.,]', Punctuation), + (r'=|:|;', Punctuation), + + (r'"[^"]*"', String), + + (r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float), + (r'\d+[eEf][+-]?[0-9]+', Number.Float), + (r'\d+', Number.Integer), + + # quote can be transpose, instead of string: + # (not great, but handles common cases...) + (r'(?<=[\w)\].])\'+', Operator), + (r'(?|<=|>=|&&|&|~|\|\|?', Operator), + # operators requiring escape for re: + (r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator), + + # punctuation: + (r'[\[\](){}@.,=:;]', Punctuation), + + (r'"[^"]*"', String), + + # quote can be transpose, instead of string: + # (not great, but handles common cases...) + (r'(?<=[\w)\].])\'+', Operator), + (r'(?', '->', '#', - # Modules - ':>', - } - - nonid_reserved = {'(', ')', '[', ']', '{', '}', ',', ';', '...', '_'} - - alphanumid_re = r"[a-zA-Z][\w']*" - symbolicid_re = r"[!%&$#+\-/:<=>?@\\~`^|*]+" - - # A character constant is a sequence of the form #s, where s is a string - # constant denoting a string of size one character. This setup just parses - # the entire string as either a String.Double or a String.Char (depending - # on the argument), even if the String.Char is an erronous - # multiple-character string. - def stringy(whatkind): - return [ - (r'[^"\\]', whatkind), - (r'\\[\\"abtnvfr]', String.Escape), - # Control-character notation is used for codes < 32, - # where \^@ == \000 - (r'\\\^[\x40-\x5e]', String.Escape), - # Docs say 'decimal digits' - (r'\\[0-9]{3}', String.Escape), - (r'\\u[0-9a-fA-F]{4}', String.Escape), - (r'\\\s+\\', String.Interpol), - (r'"', whatkind, '#pop'), - ] - - # Callbacks for distinguishing tokens and reserved words - def long_id_callback(self, match): - if match.group(1) in self.alphanumid_reserved: - token = Error - else: - token = Name.Namespace - yield match.start(1), token, match.group(1) - yield match.start(2), Punctuation, match.group(2) - - def end_id_callback(self, match): - if match.group(1) in self.alphanumid_reserved: - token = Error - elif match.group(1) in self.symbolicid_reserved: - token = Error - else: - token = Name - yield match.start(1), token, match.group(1) - - def id_callback(self, match): - str = match.group(1) - if str in self.alphanumid_reserved: - token = Keyword.Reserved - elif str in self.symbolicid_reserved: - token = Punctuation - else: - token = Name - yield match.start(1), token, str - - tokens = { - # Whitespace and comments are (almost) everywhere - 'whitespace': [ - (r'\s+', Text), - (r'\(\*', Comment.Multiline, 'comment'), - ], - - 'delimiters': [ - # This lexer treats these delimiters specially: - # Delimiters define scopes, and the scope is how the meaning of - # the `|' is resolved - is it a case/handle expression, or function - # definition by cases? (This is not how the Definition works, but - # it's how MLton behaves, see http://mlton.org/SMLNJDeviations) - (r'\(|\[|\{', Punctuation, 'main'), - (r'\)|\]|\}', Punctuation, '#pop'), - (r'\b(let|if|local)\b(?!\')', Keyword.Reserved, ('main', 'main')), - (r'\b(struct|sig|while)\b(?!\')', Keyword.Reserved, 'main'), - (r'\b(do|else|end|in|then)\b(?!\')', Keyword.Reserved, '#pop'), - ], - - 'core': [ - # Punctuation that doesn't overlap symbolic identifiers - (r'(%s)' % '|'.join(re.escape(z) for z in nonid_reserved), - Punctuation), - - # Special constants: strings, floats, numbers in decimal and hex - (r'#"', String.Char, 'char'), - (r'"', String.Double, 'string'), - (r'~?0x[0-9a-fA-F]+', Number.Hex), - (r'0wx[0-9a-fA-F]+', Number.Hex), - (r'0w\d+', Number.Integer), - (r'~?\d+\.\d+[eE]~?\d+', Number.Float), - (r'~?\d+\.\d+', Number.Float), - (r'~?\d+[eE]~?\d+', Number.Float), - (r'~?\d+', Number.Integer), - - # Labels - (r'#\s*[1-9][0-9]*', Name.Label), - (r'#\s*(%s)' % alphanumid_re, Name.Label), - (r'#\s+(%s)' % symbolicid_re, Name.Label), - # Some reserved words trigger a special, local lexer state change - (r'\b(datatype|abstype)\b(?!\')', Keyword.Reserved, 'dname'), - (r'(?=\b(exception)\b(?!\'))', Text, ('ename')), - (r'\b(functor|include|open|signature|structure)\b(?!\')', - Keyword.Reserved, 'sname'), - (r'\b(type|eqtype)\b(?!\')', Keyword.Reserved, 'tname'), - - # Regular identifiers, long and otherwise - (r'\'[\w\']*', Name.Decorator), - (r'(%s)(\.)' % alphanumid_re, long_id_callback, "dotted"), - (r'(%s)' % alphanumid_re, id_callback), - (r'(%s)' % symbolicid_re, id_callback), - ], - 'dotted': [ - (r'(%s)(\.)' % alphanumid_re, long_id_callback), - (r'(%s)' % alphanumid_re, end_id_callback, "#pop"), - (r'(%s)' % symbolicid_re, end_id_callback, "#pop"), - (r'\s+', Error), - (r'\S+', Error), - ], - - - # Main parser (prevents errors in files that have scoping errors) - 'root': [ - default('main') - ], - - # In this scope, I expect '|' to not be followed by a function name, - # and I expect 'and' to be followed by a binding site - 'main': [ - include('whitespace'), - - # Special behavior of val/and/fun - (r'\b(val|and)\b(?!\')', Keyword.Reserved, 'vname'), - (r'\b(fun)\b(?!\')', Keyword.Reserved, - ('#pop', 'main-fun', 'fname')), - - include('delimiters'), - include('core'), - (r'\S+', Error), - ], - - # In this scope, I expect '|' and 'and' to be followed by a function - 'main-fun': [ - include('whitespace'), - - (r'\s', Text), - (r'\(\*', Comment.Multiline, 'comment'), - - # Special behavior of val/and/fun - (r'\b(fun|and)\b(?!\')', Keyword.Reserved, 'fname'), - (r'\b(val)\b(?!\')', Keyword.Reserved, - ('#pop', 'main', 'vname')), - - # Special behavior of '|' and '|'-manipulating keywords - (r'\|', Punctuation, 'fname'), - (r'\b(case|handle)\b(?!\')', Keyword.Reserved, - ('#pop', 'main')), - - include('delimiters'), - include('core'), - (r'\S+', Error), - ], - - # Character and string parsers - 'char': stringy(String.Char), - 'string': stringy(String.Double), - - 'breakout': [ - (r'(?=\b(%s)\b(?!\'))' % '|'.join(alphanumid_reserved), Text, '#pop'), - ], - - # Dealing with what comes after module system keywords - 'sname': [ - include('whitespace'), - include('breakout'), - - (r'(%s)' % alphanumid_re, Name.Namespace), - default('#pop'), - ], - - # Dealing with what comes after the 'fun' (or 'and' or '|') keyword - 'fname': [ - include('whitespace'), - (r'\'[\w\']*', Name.Decorator), - (r'\(', Punctuation, 'tyvarseq'), - - (r'(%s)' % alphanumid_re, Name.Function, '#pop'), - (r'(%s)' % symbolicid_re, Name.Function, '#pop'), - - # Ignore interesting function declarations like "fun (x + y) = ..." - default('#pop'), - ], - - # Dealing with what comes after the 'val' (or 'and') keyword - 'vname': [ - include('whitespace'), - (r'\'[\w\']*', Name.Decorator), - (r'\(', Punctuation, 'tyvarseq'), - - (r'(%s)(\s*)(=(?!%s))' % (alphanumid_re, symbolicid_re), - bygroups(Name.Variable, Text, Punctuation), '#pop'), - (r'(%s)(\s*)(=(?!%s))' % (symbolicid_re, symbolicid_re), - bygroups(Name.Variable, Text, Punctuation), '#pop'), - (r'(%s)' % alphanumid_re, Name.Variable, '#pop'), - (r'(%s)' % symbolicid_re, Name.Variable, '#pop'), - - # Ignore interesting patterns like 'val (x, y)' - default('#pop'), - ], - - # Dealing with what comes after the 'type' (or 'and') keyword - 'tname': [ - include('whitespace'), - include('breakout'), - - (r'\'[\w\']*', Name.Decorator), - (r'\(', Punctuation, 'tyvarseq'), - (r'=(?!%s)' % symbolicid_re, Punctuation, ('#pop', 'typbind')), - - (r'(%s)' % alphanumid_re, Keyword.Type), - (r'(%s)' % symbolicid_re, Keyword.Type), - (r'\S+', Error, '#pop'), - ], - - # A type binding includes most identifiers - 'typbind': [ - include('whitespace'), - - (r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')), - - include('breakout'), - include('core'), - (r'\S+', Error, '#pop'), - ], - - # Dealing with what comes after the 'datatype' (or 'and') keyword - 'dname': [ - include('whitespace'), - include('breakout'), - - (r'\'[\w\']*', Name.Decorator), - (r'\(', Punctuation, 'tyvarseq'), - (r'(=)(\s*)(datatype)', - bygroups(Punctuation, Text, Keyword.Reserved), '#pop'), - (r'=(?!%s)' % symbolicid_re, Punctuation, - ('#pop', 'datbind', 'datcon')), - - (r'(%s)' % alphanumid_re, Keyword.Type), - (r'(%s)' % symbolicid_re, Keyword.Type), - (r'\S+', Error, '#pop'), - ], - - # common case - A | B | C of int - 'datbind': [ - include('whitespace'), - - (r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'dname')), - (r'\b(withtype)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')), - (r'\b(of)\b(?!\')', Keyword.Reserved), - - (r'(\|)(\s*)(%s)' % alphanumid_re, - bygroups(Punctuation, Text, Name.Class)), - (r'(\|)(\s+)(%s)' % symbolicid_re, - bygroups(Punctuation, Text, Name.Class)), - - include('breakout'), - include('core'), - (r'\S+', Error), - ], - - # Dealing with what comes after an exception - 'ename': [ - include('whitespace'), - - (r'(exception|and)\b(\s+)(%s)' % alphanumid_re, - bygroups(Keyword.Reserved, Text, Name.Class)), - (r'(exception|and)\b(\s*)(%s)' % symbolicid_re, - bygroups(Keyword.Reserved, Text, Name.Class)), - (r'\b(of)\b(?!\')', Keyword.Reserved), - - include('breakout'), - include('core'), - (r'\S+', Error), - ], - - 'datcon': [ - include('whitespace'), - (r'(%s)' % alphanumid_re, Name.Class, '#pop'), - (r'(%s)' % symbolicid_re, Name.Class, '#pop'), - (r'\S+', Error, '#pop'), - ], - - # Series of type variables - 'tyvarseq': [ - (r'\s', Text), - (r'\(\*', Comment.Multiline, 'comment'), - - (r'\'[\w\']*', Name.Decorator), - (alphanumid_re, Name), - (r',', Punctuation), - (r'\)', Punctuation, '#pop'), - (symbolicid_re, Name), - ], - - 'comment': [ - (r'[^(*)]', Comment.Multiline), - (r'\(\*', Comment.Multiline, '#push'), - (r'\*\)', Comment.Multiline, '#pop'), - (r'[(*)]', Comment.Multiline), - ], - } - - -class OcamlLexer(RegexLexer): - """ - For the OCaml language. - - .. versionadded:: 0.7 - """ - - name = 'OCaml' - aliases = ['ocaml'] - filenames = ['*.ml', '*.mli', '*.mll', '*.mly'] - mimetypes = ['text/x-ocaml'] - - keywords = ( - 'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done', - 'downto', 'else', 'end', 'exception', 'external', 'false', - 'for', 'fun', 'function', 'functor', 'if', 'in', 'include', - 'inherit', 'initializer', 'lazy', 'let', 'match', 'method', - 'module', 'mutable', 'new', 'object', 'of', 'open', 'private', - 'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try', - 'type', 'value', 'val', 'virtual', 'when', 'while', 'with', - ) - keyopts = ( - '!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-', - r'-\.', '->', r'\.', r'\.\.', ':', '::', ':=', ':>', ';', ';;', '<', - '<-', '=', '>', '>]', r'>\}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>', - r'\[\|', ']', '_', '`', r'\{', r'\{<', r'\|', r'\|]', r'\}', '~' - ) - - operators = r'[!$%&*+\./:<=>?@^|~-]' - word_operators = ('and', 'asr', 'land', 'lor', 'lsl', 'lxor', 'mod', 'or') - prefix_syms = r'[!?~]' - infix_syms = r'[=<>@^|&+\*/$%-]' - primitives = ('unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array') - - tokens = { - 'escape-sequence': [ - (r'\\[\\"\'ntbr]', String.Escape), - (r'\\[0-9]{3}', String.Escape), - (r'\\x[0-9a-fA-F]{2}', String.Escape), - ], - 'root': [ - (r'\s+', Text), - (r'false|true|\(\)|\[\]', Name.Builtin.Pseudo), - (r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Namespace, 'dotted'), - (r'\b([A-Z][\w\']*)', Name.Class), - (r'\(\*(?![)])', Comment, 'comment'), - (r'\b(%s)\b' % '|'.join(keywords), Keyword), - (r'(%s)' % '|'.join(keyopts[::-1]), Operator), - (r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator), - (r'\b(%s)\b' % '|'.join(word_operators), Operator.Word), - (r'\b(%s)\b' % '|'.join(primitives), Keyword.Type), - - (r"[^\W\d][\w']*", Name), - - (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float), - (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex), - (r'0[oO][0-7][0-7_]*', Number.Oct), - (r'0[bB][01][01_]*', Number.Bin), - (r'\d[\d_]*', Number.Integer), - - (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'", - String.Char), - (r"'.'", String.Char), - (r"'", Keyword), # a stray quote is another syntax element - - (r'"', String.Double, 'string'), - - (r'[~?][a-z][\w\']*:', Name.Variable), - ], - 'comment': [ - (r'[^(*)]+', Comment), - (r'\(\*', Comment, '#push'), - (r'\*\)', Comment, '#pop'), - (r'[(*)]', Comment), - ], - 'string': [ - (r'[^\\"]+', String.Double), - include('escape-sequence'), - (r'\\\n', String.Double), - (r'"', String.Double, '#pop'), - ], - 'dotted': [ - (r'\s+', Text), - (r'\.', Punctuation), - (r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace), - (r'[A-Z][\w\']*', Name.Class, '#pop'), - (r'[a-z_][\w\']*', Name, '#pop'), - default('#pop'), - ], - } - -class OpaLexer(RegexLexer): - """ - Lexer for the Opa language (http://opalang.org). - - .. versionadded:: 1.5 - """ - - name = 'Opa' - aliases = ['opa'] - filenames = ['*.opa'] - mimetypes = ['text/x-opa'] - - # most of these aren't strictly keywords - # but if you color only real keywords, you might just - # as well not color anything - keywords = ( - 'and', 'as', 'begin', 'case', 'client', 'css', 'database', 'db', 'do', - 'else', 'end', 'external', 'forall', 'function', 'if', 'import', - 'match', 'module', 'or', 'package', 'parser', 'rec', 'server', 'then', - 'type', 'val', 'with', 'xml_parser', - ) - - # matches both stuff and `stuff` - ident_re = r'(([a-zA-Z_]\w*)|(`[^`]*`))' - - op_re = r'[.=\-<>,@~%/+?*&^!]' - punc_re = r'[()\[\],;|]' # '{' and '}' are treated elsewhere - # because they are also used for inserts - - tokens = { - # copied from the caml lexer, should be adapted - 'escape-sequence': [ - (r'\\[\\"\'ntr}]', String.Escape), - (r'\\[0-9]{3}', String.Escape), - (r'\\x[0-9a-fA-F]{2}', String.Escape), - ], - - # factorizing these rules, because they are inserted many times - 'comments': [ - (r'/\*', Comment, 'nested-comment'), - (r'//.*?$', Comment), - ], - 'comments-and-spaces': [ - include('comments'), - (r'\s+', Text), - ], - - 'root': [ - include('comments-and-spaces'), - # keywords - (words(keywords, prefix=r'\b', suffix=r'\b'), Keyword), - # directives - # we could parse the actual set of directives instead of anything - # starting with @, but this is troublesome - # because it needs to be adjusted all the time - # and assuming we parse only sources that compile, it is useless - (r'@' + ident_re + r'\b', Name.Builtin.Pseudo), - - # number literals - (r'-?.[\d]+([eE][+\-]?\d+)', Number.Float), - (r'-?\d+.\d*([eE][+\-]?\d+)', Number.Float), - (r'-?\d+[eE][+\-]?\d+', Number.Float), - (r'0[xX][\da-fA-F]+', Number.Hex), - (r'0[oO][0-7]+', Number.Oct), - (r'0[bB][01]+', Number.Bin), - (r'\d+', Number.Integer), - # color literals - (r'#[\da-fA-F]{3,6}', Number.Integer), - - # string literals - (r'"', String.Double, 'string'), - # char literal, should be checked because this is the regexp from - # the caml lexer - (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2})|.)'", - String.Char), - - # this is meant to deal with embedded exprs in strings - # every time we find a '}' we pop a state so that if we were - # inside a string, we are back in the string state - # as a consequence, we must also push a state every time we find a - # '{' or else we will have errors when parsing {} for instance - (r'\{', Operator, '#push'), - (r'\}', Operator, '#pop'), - - # html literals - # this is a much more strict that the actual parser, - # since a])', String.Single, 'html-open-tag'), - - # db path - # matching the '[_]' in '/a[_]' because it is a part - # of the syntax of the db path definition - # unfortunately, i don't know how to match the ']' in - # /a[1], so this is somewhat inconsistent - (r'[@?!]?(/\w+)+(\[_\])?', Name.Variable), - # putting the same color on <- as on db path, since - # it can be used only to mean Db.write - (r'<-(?!'+op_re+r')', Name.Variable), - - # 'modules' - # although modules are not distinguished by their names as in caml - # the standard library seems to follow the convention that modules - # only area capitalized - (r'\b([A-Z]\w*)(?=\.)', Name.Namespace), - - # operators - # = has a special role because this is the only - # way to syntactic distinguish binding constructions - # unfortunately, this colors the equal in {x=2} too - (r'=(?!'+op_re+r')', Keyword), - (r'(%s)+' % op_re, Operator), - (r'(%s)+' % punc_re, Operator), - - # coercions - (r':', Operator, 'type'), - # type variables - # we need this rule because we don't parse specially type - # definitions so in "type t('a) = ...", "'a" is parsed by 'root' - ("'"+ident_re, Keyword.Type), - - # id literal, #something, or #{expr} - (r'#'+ident_re, String.Single), - (r'#(?=\{)', String.Single), - - # identifiers - # this avoids to color '2' in 'a2' as an integer - (ident_re, Text), - - # default, not sure if that is needed or not - # (r'.', Text), - ], - - # it is quite painful to have to parse types to know where they end - # this is the general rule for a type - # a type is either: - # * -> ty - # * type-with-slash - # * type-with-slash -> ty - # * type-with-slash (, type-with-slash)+ -> ty - # - # the code is pretty funky in here, but this code would roughly - # translate in caml to: - # let rec type stream = - # match stream with - # | [< "->"; stream >] -> type stream - # | [< ""; stream >] -> - # type_with_slash stream - # type_lhs_1 stream; - # and type_1 stream = ... - 'type': [ - include('comments-and-spaces'), - (r'->', Keyword.Type), - default(('#pop', 'type-lhs-1', 'type-with-slash')), - ], - - # parses all the atomic or closed constructions in the syntax of type - # expressions: record types, tuple types, type constructors, basic type - # and type variables - 'type-1': [ - include('comments-and-spaces'), - (r'\(', Keyword.Type, ('#pop', 'type-tuple')), - (r'~?\{', Keyword.Type, ('#pop', 'type-record')), - (ident_re+r'\(', Keyword.Type, ('#pop', 'type-tuple')), - (ident_re, Keyword.Type, '#pop'), - ("'"+ident_re, Keyword.Type), - # this case is not in the syntax but sometimes - # we think we are parsing types when in fact we are parsing - # some css, so we just pop the states until we get back into - # the root state - default('#pop'), - ], - - # type-with-slash is either: - # * type-1 - # * type-1 (/ type-1)+ - 'type-with-slash': [ - include('comments-and-spaces'), - default(('#pop', 'slash-type-1', 'type-1')), - ], - 'slash-type-1': [ - include('comments-and-spaces'), - ('/', Keyword.Type, ('#pop', 'type-1')), - # same remark as above - default('#pop'), - ], - - # we go in this state after having parsed a type-with-slash - # while trying to parse a type - # and at this point we must determine if we are parsing an arrow - # type (in which case we must continue parsing) or not (in which - # case we stop) - 'type-lhs-1': [ - include('comments-and-spaces'), - (r'->', Keyword.Type, ('#pop', 'type')), - (r'(?=,)', Keyword.Type, ('#pop', 'type-arrow')), - default('#pop'), - ], - 'type-arrow': [ - include('comments-and-spaces'), - # the look ahead here allows to parse f(x : int, y : float -> truc) - # correctly - (r',(?=[^:]*?->)', Keyword.Type, 'type-with-slash'), - (r'->', Keyword.Type, ('#pop', 'type')), - # same remark as above - default('#pop'), - ], - - # no need to do precise parsing for tuples and records - # because they are closed constructions, so we can simply - # find the closing delimiter - # note that this function would be not work if the source - # contained identifiers like `{)` (although it could be patched - # to support it) - 'type-tuple': [ - include('comments-and-spaces'), - (r'[^()/*]+', Keyword.Type), - (r'[/*]', Keyword.Type), - (r'\(', Keyword.Type, '#push'), - (r'\)', Keyword.Type, '#pop'), - ], - 'type-record': [ - include('comments-and-spaces'), - (r'[^{}/*]+', Keyword.Type), - (r'[/*]', Keyword.Type), - (r'\{', Keyword.Type, '#push'), - (r'\}', Keyword.Type, '#pop'), - ], - - # 'type-tuple': [ - # include('comments-and-spaces'), - # (r'\)', Keyword.Type, '#pop'), - # default(('#pop', 'type-tuple-1', 'type-1')), - # ], - # 'type-tuple-1': [ - # include('comments-and-spaces'), - # (r',?\s*\)', Keyword.Type, '#pop'), # ,) is a valid end of tuple, in (1,) - # (r',', Keyword.Type, 'type-1'), - # ], - # 'type-record':[ - # include('comments-and-spaces'), - # (r'\}', Keyword.Type, '#pop'), - # (r'~?(?:\w+|`[^`]*`)', Keyword.Type, 'type-record-field-expr'), - # ], - # 'type-record-field-expr': [ - # - # ], - - 'nested-comment': [ - (r'[^/*]+', Comment), - (r'/\*', Comment, '#push'), - (r'\*/', Comment, '#pop'), - (r'[/*]', Comment), - ], - - # the copy pasting between string and single-string - # is kinda sad. Is there a way to avoid that?? - 'string': [ - (r'[^\\"{]+', String.Double), - (r'"', String.Double, '#pop'), - (r'\{', Operator, 'root'), - include('escape-sequence'), - ], - 'single-string': [ - (r'[^\\\'{]+', String.Double), - (r'\'', String.Double, '#pop'), - (r'\{', Operator, 'root'), - include('escape-sequence'), - ], - - # all the html stuff - # can't really reuse some existing html parser - # because we must be able to parse embedded expressions - - # we are in this state after someone parsed the '<' that - # started the html literal - 'html-open-tag': [ - (r'[\w\-:]+', String.Single, ('#pop', 'html-attr')), - (r'>', String.Single, ('#pop', 'html-content')), - ], - - # we are in this state after someone parsed the ' is allowed - (r'[\w\-:]*>', String.Single, '#pop'), - ], - - # we are in this state after having parsed '', String.Single, '#pop'), - (r'>', String.Single, ('#pop', 'html-content')), - ], - - 'html-attr-value': [ - (r"'", String.Single, ('#pop', 'single-string')), - (r'"', String.Single, ('#pop', 'string')), - (r'#'+ident_re, String.Single, '#pop'), - (r'#(?=\{)', String.Single, ('#pop', 'root')), - (r'[^"\'{`=<>]+', String.Single, '#pop'), - (r'\{', Operator, ('#pop', 'root')), # this is a tail call! - ], - - # we should probably deal with '\' escapes here - 'html-content': [ - (r'', Comment, '#pop'), - (r'[^\-]+|-', Comment), - ], - } - -class ReasonLexer(RegexLexer): - """ - For the ReasonML language (https://reasonml.github.io/). - - .. versionadded:: 2.6 - """ - - name = 'ReasonML' - aliases = ['reason', "reasonml"] - filenames = ['*.re', '*.rei'] - mimetypes = ['text/x-reasonml'] - - keywords = ( - 'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done', 'downto', - 'else', 'end', 'exception', 'external', 'false', 'for', 'fun', 'esfun', - 'function', 'functor', 'if', 'in', 'include', 'inherit', 'initializer', 'lazy', - 'let', 'switch', 'module', 'pub', 'mutable', 'new', 'nonrec', 'object', 'of', - 'open', 'pri', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try', - 'type', 'val', 'virtual', 'when', 'while', 'with' - ) - keyopts = ( - '!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-', - r'-\.', '=>', r'\.', r'\.\.', r'\.\.\.', ':', '::', ':=', ':>', ';', ';;', '<', - '<-', '=', '>', '>]', r'>\}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>', - r'\[\|', ']', '_', '`', r'\{', r'\{<', r'\|\|', r'\|', r'\|]', r'\}', '~' - ) - - operators = r'[!$%&*+\./:<=>?@^|~-]' - word_operators = ('and', 'asr', 'land', 'lor', 'lsl', 'lsr', 'lxor', 'mod', 'or') - prefix_syms = r'[!?~]' - infix_syms = r'[=<>@^|&+\*/$%-]' - primitives = ('unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array') - - tokens = { - 'escape-sequence': [ - (r'\\[\\"\'ntbr]', String.Escape), - (r'\\[0-9]{3}', String.Escape), - (r'\\x[0-9a-fA-F]{2}', String.Escape), - ], - 'root': [ - (r'\s+', Text), - (r'false|true|\(\)|\[\]', Name.Builtin.Pseudo), - (r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Namespace, 'dotted'), - (r'\b([A-Z][\w\']*)', Name.Class), - (r'//.*?\n', Comment.Single), - (r'\/\*(?![\/])', Comment.Multiline, 'comment'), - (r'\b(%s)\b' % '|'.join(keywords), Keyword), - (r'(%s)' % '|'.join(keyopts[::-1]), Operator.Word), - (r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator), - (r'\b(%s)\b' % '|'.join(word_operators), Operator.Word), - (r'\b(%s)\b' % '|'.join(primitives), Keyword.Type), - - (r"[^\W\d][\w']*", Name), - - (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float), - (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex), - (r'0[oO][0-7][0-7_]*', Number.Oct), - (r'0[bB][01][01_]*', Number.Bin), - (r'\d[\d_]*', Number.Integer), - - (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'", - String.Char), - (r"'.'", String.Char), - (r"'", Keyword), - - (r'"', String.Double, 'string'), - - (r'[~?][a-z][\w\']*:', Name.Variable), - ], - 'comment': [ - (r'[^\/*]+', Comment.Multiline), - (r'\/\*', Comment.Multiline, '#push'), - (r'\*\/', Comment.Multiline, '#pop'), - (r'[\*]', Comment.Multiline), - ], - 'string': [ - (r'[^\\"]+', String.Double), - include('escape-sequence'), - (r'\\\n', String.Double), - (r'"', String.Double, '#pop'), - ], - 'dotted': [ - (r'\s+', Text), - (r'\.', Punctuation), - (r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace), - (r'[A-Z][\w\']*', Name.Class, '#pop'), - (r'[a-z_][\w\']*', Name, '#pop'), - default('#pop'), - ], - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.ml + ~~~~~~~~~~~~~~~~~~ + + Lexers for ML family languages. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, default, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error + +__all__ = ['SMLLexer', 'OcamlLexer', 'OpaLexer', 'ReasonLexer', 'FStarLexer'] + + +class SMLLexer(RegexLexer): + """ + For the Standard ML language. + + .. versionadded:: 1.5 + """ + + name = 'Standard ML' + aliases = ['sml'] + filenames = ['*.sml', '*.sig', '*.fun'] + mimetypes = ['text/x-standardml', 'application/x-standardml'] + + alphanumid_reserved = { + # Core + 'abstype', 'and', 'andalso', 'as', 'case', 'datatype', 'do', 'else', + 'end', 'exception', 'fn', 'fun', 'handle', 'if', 'in', 'infix', + 'infixr', 'let', 'local', 'nonfix', 'of', 'op', 'open', 'orelse', + 'raise', 'rec', 'then', 'type', 'val', 'with', 'withtype', 'while', + # Modules + 'eqtype', 'functor', 'include', 'sharing', 'sig', 'signature', + 'struct', 'structure', 'where', + } + + symbolicid_reserved = { + # Core + ':', r'\|', '=', '=>', '->', '#', + # Modules + ':>', + } + + nonid_reserved = {'(', ')', '[', ']', '{', '}', ',', ';', '...', '_'} + + alphanumid_re = r"[a-zA-Z][\w']*" + symbolicid_re = r"[!%&$#+\-/:<=>?@\\~`^|*]+" + + # A character constant is a sequence of the form #s, where s is a string + # constant denoting a string of size one character. This setup just parses + # the entire string as either a String.Double or a String.Char (depending + # on the argument), even if the String.Char is an erronous + # multiple-character string. + def stringy(whatkind): + return [ + (r'[^"\\]', whatkind), + (r'\\[\\"abtnvfr]', String.Escape), + # Control-character notation is used for codes < 32, + # where \^@ == \000 + (r'\\\^[\x40-\x5e]', String.Escape), + # Docs say 'decimal digits' + (r'\\[0-9]{3}', String.Escape), + (r'\\u[0-9a-fA-F]{4}', String.Escape), + (r'\\\s+\\', String.Interpol), + (r'"', whatkind, '#pop'), + ] + + # Callbacks for distinguishing tokens and reserved words + def long_id_callback(self, match): + if match.group(1) in self.alphanumid_reserved: + token = Error + else: + token = Name.Namespace + yield match.start(1), token, match.group(1) + yield match.start(2), Punctuation, match.group(2) + + def end_id_callback(self, match): + if match.group(1) in self.alphanumid_reserved: + token = Error + elif match.group(1) in self.symbolicid_reserved: + token = Error + else: + token = Name + yield match.start(1), token, match.group(1) + + def id_callback(self, match): + str = match.group(1) + if str in self.alphanumid_reserved: + token = Keyword.Reserved + elif str in self.symbolicid_reserved: + token = Punctuation + else: + token = Name + yield match.start(1), token, str + + tokens = { + # Whitespace and comments are (almost) everywhere + 'whitespace': [ + (r'\s+', Text), + (r'\(\*', Comment.Multiline, 'comment'), + ], + + 'delimiters': [ + # This lexer treats these delimiters specially: + # Delimiters define scopes, and the scope is how the meaning of + # the `|' is resolved - is it a case/handle expression, or function + # definition by cases? (This is not how the Definition works, but + # it's how MLton behaves, see http://mlton.org/SMLNJDeviations) + (r'\(|\[|\{', Punctuation, 'main'), + (r'\)|\]|\}', Punctuation, '#pop'), + (r'\b(let|if|local)\b(?!\')', Keyword.Reserved, ('main', 'main')), + (r'\b(struct|sig|while)\b(?!\')', Keyword.Reserved, 'main'), + (r'\b(do|else|end|in|then)\b(?!\')', Keyword.Reserved, '#pop'), + ], + + 'core': [ + # Punctuation that doesn't overlap symbolic identifiers + (r'(%s)' % '|'.join(re.escape(z) for z in nonid_reserved), + Punctuation), + + # Special constants: strings, floats, numbers in decimal and hex + (r'#"', String.Char, 'char'), + (r'"', String.Double, 'string'), + (r'~?0x[0-9a-fA-F]+', Number.Hex), + (r'0wx[0-9a-fA-F]+', Number.Hex), + (r'0w\d+', Number.Integer), + (r'~?\d+\.\d+[eE]~?\d+', Number.Float), + (r'~?\d+\.\d+', Number.Float), + (r'~?\d+[eE]~?\d+', Number.Float), + (r'~?\d+', Number.Integer), + + # Labels + (r'#\s*[1-9][0-9]*', Name.Label), + (r'#\s*(%s)' % alphanumid_re, Name.Label), + (r'#\s+(%s)' % symbolicid_re, Name.Label), + # Some reserved words trigger a special, local lexer state change + (r'\b(datatype|abstype)\b(?!\')', Keyword.Reserved, 'dname'), + (r'(?=\b(exception)\b(?!\'))', Text, ('ename')), + (r'\b(functor|include|open|signature|structure)\b(?!\')', + Keyword.Reserved, 'sname'), + (r'\b(type|eqtype)\b(?!\')', Keyword.Reserved, 'tname'), + + # Regular identifiers, long and otherwise + (r'\'[\w\']*', Name.Decorator), + (r'(%s)(\.)' % alphanumid_re, long_id_callback, "dotted"), + (r'(%s)' % alphanumid_re, id_callback), + (r'(%s)' % symbolicid_re, id_callback), + ], + 'dotted': [ + (r'(%s)(\.)' % alphanumid_re, long_id_callback), + (r'(%s)' % alphanumid_re, end_id_callback, "#pop"), + (r'(%s)' % symbolicid_re, end_id_callback, "#pop"), + (r'\s+', Error), + (r'\S+', Error), + ], + + + # Main parser (prevents errors in files that have scoping errors) + 'root': [ + default('main') + ], + + # In this scope, I expect '|' to not be followed by a function name, + # and I expect 'and' to be followed by a binding site + 'main': [ + include('whitespace'), + + # Special behavior of val/and/fun + (r'\b(val|and)\b(?!\')', Keyword.Reserved, 'vname'), + (r'\b(fun)\b(?!\')', Keyword.Reserved, + ('#pop', 'main-fun', 'fname')), + + include('delimiters'), + include('core'), + (r'\S+', Error), + ], + + # In this scope, I expect '|' and 'and' to be followed by a function + 'main-fun': [ + include('whitespace'), + + (r'\s', Text), + (r'\(\*', Comment.Multiline, 'comment'), + + # Special behavior of val/and/fun + (r'\b(fun|and)\b(?!\')', Keyword.Reserved, 'fname'), + (r'\b(val)\b(?!\')', Keyword.Reserved, + ('#pop', 'main', 'vname')), + + # Special behavior of '|' and '|'-manipulating keywords + (r'\|', Punctuation, 'fname'), + (r'\b(case|handle)\b(?!\')', Keyword.Reserved, + ('#pop', 'main')), + + include('delimiters'), + include('core'), + (r'\S+', Error), + ], + + # Character and string parsers + 'char': stringy(String.Char), + 'string': stringy(String.Double), + + 'breakout': [ + (r'(?=\b(%s)\b(?!\'))' % '|'.join(alphanumid_reserved), Text, '#pop'), + ], + + # Dealing with what comes after module system keywords + 'sname': [ + include('whitespace'), + include('breakout'), + + (r'(%s)' % alphanumid_re, Name.Namespace), + default('#pop'), + ], + + # Dealing with what comes after the 'fun' (or 'and' or '|') keyword + 'fname': [ + include('whitespace'), + (r'\'[\w\']*', Name.Decorator), + (r'\(', Punctuation, 'tyvarseq'), + + (r'(%s)' % alphanumid_re, Name.Function, '#pop'), + (r'(%s)' % symbolicid_re, Name.Function, '#pop'), + + # Ignore interesting function declarations like "fun (x + y) = ..." + default('#pop'), + ], + + # Dealing with what comes after the 'val' (or 'and') keyword + 'vname': [ + include('whitespace'), + (r'\'[\w\']*', Name.Decorator), + (r'\(', Punctuation, 'tyvarseq'), + + (r'(%s)(\s*)(=(?!%s))' % (alphanumid_re, symbolicid_re), + bygroups(Name.Variable, Text, Punctuation), '#pop'), + (r'(%s)(\s*)(=(?!%s))' % (symbolicid_re, symbolicid_re), + bygroups(Name.Variable, Text, Punctuation), '#pop'), + (r'(%s)' % alphanumid_re, Name.Variable, '#pop'), + (r'(%s)' % symbolicid_re, Name.Variable, '#pop'), + + # Ignore interesting patterns like 'val (x, y)' + default('#pop'), + ], + + # Dealing with what comes after the 'type' (or 'and') keyword + 'tname': [ + include('whitespace'), + include('breakout'), + + (r'\'[\w\']*', Name.Decorator), + (r'\(', Punctuation, 'tyvarseq'), + (r'=(?!%s)' % symbolicid_re, Punctuation, ('#pop', 'typbind')), + + (r'(%s)' % alphanumid_re, Keyword.Type), + (r'(%s)' % symbolicid_re, Keyword.Type), + (r'\S+', Error, '#pop'), + ], + + # A type binding includes most identifiers + 'typbind': [ + include('whitespace'), + + (r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')), + + include('breakout'), + include('core'), + (r'\S+', Error, '#pop'), + ], + + # Dealing with what comes after the 'datatype' (or 'and') keyword + 'dname': [ + include('whitespace'), + include('breakout'), + + (r'\'[\w\']*', Name.Decorator), + (r'\(', Punctuation, 'tyvarseq'), + (r'(=)(\s*)(datatype)', + bygroups(Punctuation, Text, Keyword.Reserved), '#pop'), + (r'=(?!%s)' % symbolicid_re, Punctuation, + ('#pop', 'datbind', 'datcon')), + + (r'(%s)' % alphanumid_re, Keyword.Type), + (r'(%s)' % symbolicid_re, Keyword.Type), + (r'\S+', Error, '#pop'), + ], + + # common case - A | B | C of int + 'datbind': [ + include('whitespace'), + + (r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'dname')), + (r'\b(withtype)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')), + (r'\b(of)\b(?!\')', Keyword.Reserved), + + (r'(\|)(\s*)(%s)' % alphanumid_re, + bygroups(Punctuation, Text, Name.Class)), + (r'(\|)(\s+)(%s)' % symbolicid_re, + bygroups(Punctuation, Text, Name.Class)), + + include('breakout'), + include('core'), + (r'\S+', Error), + ], + + # Dealing with what comes after an exception + 'ename': [ + include('whitespace'), + + (r'(exception|and)\b(\s+)(%s)' % alphanumid_re, + bygroups(Keyword.Reserved, Text, Name.Class)), + (r'(exception|and)\b(\s*)(%s)' % symbolicid_re, + bygroups(Keyword.Reserved, Text, Name.Class)), + (r'\b(of)\b(?!\')', Keyword.Reserved), + + include('breakout'), + include('core'), + (r'\S+', Error), + ], + + 'datcon': [ + include('whitespace'), + (r'(%s)' % alphanumid_re, Name.Class, '#pop'), + (r'(%s)' % symbolicid_re, Name.Class, '#pop'), + (r'\S+', Error, '#pop'), + ], + + # Series of type variables + 'tyvarseq': [ + (r'\s', Text), + (r'\(\*', Comment.Multiline, 'comment'), + + (r'\'[\w\']*', Name.Decorator), + (alphanumid_re, Name), + (r',', Punctuation), + (r'\)', Punctuation, '#pop'), + (symbolicid_re, Name), + ], + + 'comment': [ + (r'[^(*)]', Comment.Multiline), + (r'\(\*', Comment.Multiline, '#push'), + (r'\*\)', Comment.Multiline, '#pop'), + (r'[(*)]', Comment.Multiline), + ], + } + + +class OcamlLexer(RegexLexer): + """ + For the OCaml language. + + .. versionadded:: 0.7 + """ + + name = 'OCaml' + aliases = ['ocaml'] + filenames = ['*.ml', '*.mli', '*.mll', '*.mly'] + mimetypes = ['text/x-ocaml'] + + keywords = ( + 'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done', + 'downto', 'else', 'end', 'exception', 'external', 'false', + 'for', 'fun', 'function', 'functor', 'if', 'in', 'include', + 'inherit', 'initializer', 'lazy', 'let', 'match', 'method', + 'module', 'mutable', 'new', 'object', 'of', 'open', 'private', + 'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try', + 'type', 'value', 'val', 'virtual', 'when', 'while', 'with', + ) + keyopts = ( + '!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-', + r'-\.', '->', r'\.', r'\.\.', ':', '::', ':=', ':>', ';', ';;', '<', + '<-', '=', '>', '>]', r'>\}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>', + r'\[\|', ']', '_', '`', r'\{', r'\{<', r'\|', r'\|]', r'\}', '~' + ) + + operators = r'[!$%&*+\./:<=>?@^|~-]' + word_operators = ('and', 'asr', 'land', 'lor', 'lsl', 'lxor', 'mod', 'or') + prefix_syms = r'[!?~]' + infix_syms = r'[=<>@^|&+\*/$%-]' + primitives = ('unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array') + + tokens = { + 'escape-sequence': [ + (r'\\[\\"\'ntbr]', String.Escape), + (r'\\[0-9]{3}', String.Escape), + (r'\\x[0-9a-fA-F]{2}', String.Escape), + ], + 'root': [ + (r'\s+', Text), + (r'false|true|\(\)|\[\]', Name.Builtin.Pseudo), + (r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Namespace, 'dotted'), + (r'\b([A-Z][\w\']*)', Name.Class), + (r'\(\*(?![)])', Comment, 'comment'), + (r'\b(%s)\b' % '|'.join(keywords), Keyword), + (r'(%s)' % '|'.join(keyopts[::-1]), Operator), + (r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator), + (r'\b(%s)\b' % '|'.join(word_operators), Operator.Word), + (r'\b(%s)\b' % '|'.join(primitives), Keyword.Type), + + (r"[^\W\d][\w']*", Name), + + (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float), + (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex), + (r'0[oO][0-7][0-7_]*', Number.Oct), + (r'0[bB][01][01_]*', Number.Bin), + (r'\d[\d_]*', Number.Integer), + + (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'", + String.Char), + (r"'.'", String.Char), + (r"'", Keyword), # a stray quote is another syntax element + + (r'"', String.Double, 'string'), + + (r'[~?][a-z][\w\']*:', Name.Variable), + ], + 'comment': [ + (r'[^(*)]+', Comment), + (r'\(\*', Comment, '#push'), + (r'\*\)', Comment, '#pop'), + (r'[(*)]', Comment), + ], + 'string': [ + (r'[^\\"]+', String.Double), + include('escape-sequence'), + (r'\\\n', String.Double), + (r'"', String.Double, '#pop'), + ], + 'dotted': [ + (r'\s+', Text), + (r'\.', Punctuation), + (r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace), + (r'[A-Z][\w\']*', Name.Class, '#pop'), + (r'[a-z_][\w\']*', Name, '#pop'), + default('#pop'), + ], + } + +class OpaLexer(RegexLexer): + """ + Lexer for the Opa language (http://opalang.org). + + .. versionadded:: 1.5 + """ + + name = 'Opa' + aliases = ['opa'] + filenames = ['*.opa'] + mimetypes = ['text/x-opa'] + + # most of these aren't strictly keywords + # but if you color only real keywords, you might just + # as well not color anything + keywords = ( + 'and', 'as', 'begin', 'case', 'client', 'css', 'database', 'db', 'do', + 'else', 'end', 'external', 'forall', 'function', 'if', 'import', + 'match', 'module', 'or', 'package', 'parser', 'rec', 'server', 'then', + 'type', 'val', 'with', 'xml_parser', + ) + + # matches both stuff and `stuff` + ident_re = r'(([a-zA-Z_]\w*)|(`[^`]*`))' + + op_re = r'[.=\-<>,@~%/+?*&^!]' + punc_re = r'[()\[\],;|]' # '{' and '}' are treated elsewhere + # because they are also used for inserts + + tokens = { + # copied from the caml lexer, should be adapted + 'escape-sequence': [ + (r'\\[\\"\'ntr}]', String.Escape), + (r'\\[0-9]{3}', String.Escape), + (r'\\x[0-9a-fA-F]{2}', String.Escape), + ], + + # factorizing these rules, because they are inserted many times + 'comments': [ + (r'/\*', Comment, 'nested-comment'), + (r'//.*?$', Comment), + ], + 'comments-and-spaces': [ + include('comments'), + (r'\s+', Text), + ], + + 'root': [ + include('comments-and-spaces'), + # keywords + (words(keywords, prefix=r'\b', suffix=r'\b'), Keyword), + # directives + # we could parse the actual set of directives instead of anything + # starting with @, but this is troublesome + # because it needs to be adjusted all the time + # and assuming we parse only sources that compile, it is useless + (r'@' + ident_re + r'\b', Name.Builtin.Pseudo), + + # number literals + (r'-?.[\d]+([eE][+\-]?\d+)', Number.Float), + (r'-?\d+.\d*([eE][+\-]?\d+)', Number.Float), + (r'-?\d+[eE][+\-]?\d+', Number.Float), + (r'0[xX][\da-fA-F]+', Number.Hex), + (r'0[oO][0-7]+', Number.Oct), + (r'0[bB][01]+', Number.Bin), + (r'\d+', Number.Integer), + # color literals + (r'#[\da-fA-F]{3,6}', Number.Integer), + + # string literals + (r'"', String.Double, 'string'), + # char literal, should be checked because this is the regexp from + # the caml lexer + (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2})|.)'", + String.Char), + + # this is meant to deal with embedded exprs in strings + # every time we find a '}' we pop a state so that if we were + # inside a string, we are back in the string state + # as a consequence, we must also push a state every time we find a + # '{' or else we will have errors when parsing {} for instance + (r'\{', Operator, '#push'), + (r'\}', Operator, '#pop'), + + # html literals + # this is a much more strict that the actual parser, + # since a])', String.Single, 'html-open-tag'), + + # db path + # matching the '[_]' in '/a[_]' because it is a part + # of the syntax of the db path definition + # unfortunately, i don't know how to match the ']' in + # /a[1], so this is somewhat inconsistent + (r'[@?!]?(/\w+)+(\[_\])?', Name.Variable), + # putting the same color on <- as on db path, since + # it can be used only to mean Db.write + (r'<-(?!'+op_re+r')', Name.Variable), + + # 'modules' + # although modules are not distinguished by their names as in caml + # the standard library seems to follow the convention that modules + # only area capitalized + (r'\b([A-Z]\w*)(?=\.)', Name.Namespace), + + # operators + # = has a special role because this is the only + # way to syntactic distinguish binding constructions + # unfortunately, this colors the equal in {x=2} too + (r'=(?!'+op_re+r')', Keyword), + (r'(%s)+' % op_re, Operator), + (r'(%s)+' % punc_re, Operator), + + # coercions + (r':', Operator, 'type'), + # type variables + # we need this rule because we don't parse specially type + # definitions so in "type t('a) = ...", "'a" is parsed by 'root' + ("'"+ident_re, Keyword.Type), + + # id literal, #something, or #{expr} + (r'#'+ident_re, String.Single), + (r'#(?=\{)', String.Single), + + # identifiers + # this avoids to color '2' in 'a2' as an integer + (ident_re, Text), + + # default, not sure if that is needed or not + # (r'.', Text), + ], + + # it is quite painful to have to parse types to know where they end + # this is the general rule for a type + # a type is either: + # * -> ty + # * type-with-slash + # * type-with-slash -> ty + # * type-with-slash (, type-with-slash)+ -> ty + # + # the code is pretty funky in here, but this code would roughly + # translate in caml to: + # let rec type stream = + # match stream with + # | [< "->"; stream >] -> type stream + # | [< ""; stream >] -> + # type_with_slash stream + # type_lhs_1 stream; + # and type_1 stream = ... + 'type': [ + include('comments-and-spaces'), + (r'->', Keyword.Type), + default(('#pop', 'type-lhs-1', 'type-with-slash')), + ], + + # parses all the atomic or closed constructions in the syntax of type + # expressions: record types, tuple types, type constructors, basic type + # and type variables + 'type-1': [ + include('comments-and-spaces'), + (r'\(', Keyword.Type, ('#pop', 'type-tuple')), + (r'~?\{', Keyword.Type, ('#pop', 'type-record')), + (ident_re+r'\(', Keyword.Type, ('#pop', 'type-tuple')), + (ident_re, Keyword.Type, '#pop'), + ("'"+ident_re, Keyword.Type), + # this case is not in the syntax but sometimes + # we think we are parsing types when in fact we are parsing + # some css, so we just pop the states until we get back into + # the root state + default('#pop'), + ], + + # type-with-slash is either: + # * type-1 + # * type-1 (/ type-1)+ + 'type-with-slash': [ + include('comments-and-spaces'), + default(('#pop', 'slash-type-1', 'type-1')), + ], + 'slash-type-1': [ + include('comments-and-spaces'), + ('/', Keyword.Type, ('#pop', 'type-1')), + # same remark as above + default('#pop'), + ], + + # we go in this state after having parsed a type-with-slash + # while trying to parse a type + # and at this point we must determine if we are parsing an arrow + # type (in which case we must continue parsing) or not (in which + # case we stop) + 'type-lhs-1': [ + include('comments-and-spaces'), + (r'->', Keyword.Type, ('#pop', 'type')), + (r'(?=,)', Keyword.Type, ('#pop', 'type-arrow')), + default('#pop'), + ], + 'type-arrow': [ + include('comments-and-spaces'), + # the look ahead here allows to parse f(x : int, y : float -> truc) + # correctly + (r',(?=[^:]*?->)', Keyword.Type, 'type-with-slash'), + (r'->', Keyword.Type, ('#pop', 'type')), + # same remark as above + default('#pop'), + ], + + # no need to do precise parsing for tuples and records + # because they are closed constructions, so we can simply + # find the closing delimiter + # note that this function would be not work if the source + # contained identifiers like `{)` (although it could be patched + # to support it) + 'type-tuple': [ + include('comments-and-spaces'), + (r'[^()/*]+', Keyword.Type), + (r'[/*]', Keyword.Type), + (r'\(', Keyword.Type, '#push'), + (r'\)', Keyword.Type, '#pop'), + ], + 'type-record': [ + include('comments-and-spaces'), + (r'[^{}/*]+', Keyword.Type), + (r'[/*]', Keyword.Type), + (r'\{', Keyword.Type, '#push'), + (r'\}', Keyword.Type, '#pop'), + ], + + # 'type-tuple': [ + # include('comments-and-spaces'), + # (r'\)', Keyword.Type, '#pop'), + # default(('#pop', 'type-tuple-1', 'type-1')), + # ], + # 'type-tuple-1': [ + # include('comments-and-spaces'), + # (r',?\s*\)', Keyword.Type, '#pop'), # ,) is a valid end of tuple, in (1,) + # (r',', Keyword.Type, 'type-1'), + # ], + # 'type-record':[ + # include('comments-and-spaces'), + # (r'\}', Keyword.Type, '#pop'), + # (r'~?(?:\w+|`[^`]*`)', Keyword.Type, 'type-record-field-expr'), + # ], + # 'type-record-field-expr': [ + # + # ], + + 'nested-comment': [ + (r'[^/*]+', Comment), + (r'/\*', Comment, '#push'), + (r'\*/', Comment, '#pop'), + (r'[/*]', Comment), + ], + + # the copy pasting between string and single-string + # is kinda sad. Is there a way to avoid that?? + 'string': [ + (r'[^\\"{]+', String.Double), + (r'"', String.Double, '#pop'), + (r'\{', Operator, 'root'), + include('escape-sequence'), + ], + 'single-string': [ + (r'[^\\\'{]+', String.Double), + (r'\'', String.Double, '#pop'), + (r'\{', Operator, 'root'), + include('escape-sequence'), + ], + + # all the html stuff + # can't really reuse some existing html parser + # because we must be able to parse embedded expressions + + # we are in this state after someone parsed the '<' that + # started the html literal + 'html-open-tag': [ + (r'[\w\-:]+', String.Single, ('#pop', 'html-attr')), + (r'>', String.Single, ('#pop', 'html-content')), + ], + + # we are in this state after someone parsed the ' is allowed + (r'[\w\-:]*>', String.Single, '#pop'), + ], + + # we are in this state after having parsed '', String.Single, '#pop'), + (r'>', String.Single, ('#pop', 'html-content')), + ], + + 'html-attr-value': [ + (r"'", String.Single, ('#pop', 'single-string')), + (r'"', String.Single, ('#pop', 'string')), + (r'#'+ident_re, String.Single, '#pop'), + (r'#(?=\{)', String.Single, ('#pop', 'root')), + (r'[^"\'{`=<>]+', String.Single, '#pop'), + (r'\{', Operator, ('#pop', 'root')), # this is a tail call! + ], + + # we should probably deal with '\' escapes here + 'html-content': [ + (r'', Comment, '#pop'), + (r'[^\-]+|-', Comment), + ], + } + + +class ReasonLexer(RegexLexer): + """ + For the ReasonML language (https://reasonml.github.io/). + + .. versionadded:: 2.6 + """ + + name = 'ReasonML' + aliases = ['reason', "reasonml"] + filenames = ['*.re', '*.rei'] + mimetypes = ['text/x-reasonml'] + + keywords = ( + 'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done', 'downto', + 'else', 'end', 'exception', 'external', 'false', 'for', 'fun', 'esfun', + 'function', 'functor', 'if', 'in', 'include', 'inherit', 'initializer', 'lazy', + 'let', 'switch', 'module', 'pub', 'mutable', 'new', 'nonrec', 'object', 'of', + 'open', 'pri', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try', + 'type', 'val', 'virtual', 'when', 'while', 'with', + ) + keyopts = ( + '!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-', + r'-\.', '=>', r'\.', r'\.\.', r'\.\.\.', ':', '::', ':=', ':>', ';', ';;', '<', + '<-', '=', '>', '>]', r'>\}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>', + r'\[\|', ']', '_', '`', r'\{', r'\{<', r'\|', r'\|\|', r'\|]', r'\}', '~' + ) + + operators = r'[!$%&*+\./:<=>?@^|~-]' + word_operators = ('and', 'asr', 'land', 'lor', 'lsl', 'lsr', 'lxor', 'mod', 'or') + prefix_syms = r'[!?~]' + infix_syms = r'[=<>@^|&+\*/$%-]' + primitives = ('unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array') + + tokens = { + 'escape-sequence': [ + (r'\\[\\"\'ntbr]', String.Escape), + (r'\\[0-9]{3}', String.Escape), + (r'\\x[0-9a-fA-F]{2}', String.Escape), + ], + 'root': [ + (r'\s+', Text), + (r'false|true|\(\)|\[\]', Name.Builtin.Pseudo), + (r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Namespace, 'dotted'), + (r'\b([A-Z][\w\']*)', Name.Class), + (r'//.*?\n', Comment.Single), + (r'\/\*(?!/)', Comment.Multiline, 'comment'), + (r'\b(%s)\b' % '|'.join(keywords), Keyword), + (r'(%s)' % '|'.join(keyopts[::-1]), Operator.Word), + (r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator), + (r'\b(%s)\b' % '|'.join(word_operators), Operator.Word), + (r'\b(%s)\b' % '|'.join(primitives), Keyword.Type), + + (r"[^\W\d][\w']*", Name), + + (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float), + (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex), + (r'0[oO][0-7][0-7_]*', Number.Oct), + (r'0[bB][01][01_]*', Number.Bin), + (r'\d[\d_]*', Number.Integer), + + (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'", + String.Char), + (r"'.'", String.Char), + (r"'", Keyword), + + (r'"', String.Double, 'string'), + + (r'[~?][a-z][\w\']*:', Name.Variable), + ], + 'comment': [ + (r'[^/*]+', Comment.Multiline), + (r'\/\*', Comment.Multiline, '#push'), + (r'\*\/', Comment.Multiline, '#pop'), + (r'\*', Comment.Multiline), + ], + 'string': [ + (r'[^\\"]+', String.Double), + include('escape-sequence'), + (r'\\\n', String.Double), + (r'"', String.Double, '#pop'), + ], + 'dotted': [ + (r'\s+', Text), + (r'\.', Punctuation), + (r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace), + (r'[A-Z][\w\']*', Name.Class, '#pop'), + (r'[a-z_][\w\']*', Name, '#pop'), + default('#pop'), + ], + } + + +class FStarLexer(RegexLexer): + """ + For the F* language (https://www.fstar-lang.org/). + .. versionadded:: 2.7 + """ + + name = 'FStar' + aliases = ['fstar'] + filenames = ['*.fst', '*.fsti'] + mimetypes = ['text/x-fstar'] + + keywords = ( + 'abstract', 'attributes', 'noeq', 'unopteq', 'and' + 'begin', 'by', 'default', 'effect', 'else', 'end', 'ensures', + 'exception', 'exists', 'false', 'forall', 'fun', 'function', 'if', + 'in', 'include', 'inline', 'inline_for_extraction', 'irreducible', + 'logic', 'match', 'module', 'mutable', 'new', 'new_effect', 'noextract', + 'of', 'open', 'opaque', 'private', 'range_of', 'reifiable', + 'reify', 'reflectable', 'requires', 'set_range_of', 'sub_effect', + 'synth', 'then', 'total', 'true', 'try', 'type', 'unfold', 'unfoldable', + 'val', 'when', 'with', 'not' + ) + decl_keywords = ('let', 'rec') + assume_keywords = ('assume', 'admit', 'assert', 'calc') + keyopts = ( + r'~', r'-', r'/\\', r'\\/', r'<:', r'<@', r'\(\|', r'\|\)', r'#', r'u#', + r'&', r'\(', r'\)', r'\(\)', r',', r'~>', r'->', r'<-', r'<--', r'<==>', + r'==>', r'\.', r'\?', r'\?\.', r'\.\[', r'\.\(', r'\.\(\|', r'\.\[\|', + r'\{:pattern', r':', r'::', r':=', r';', r';;', r'=', r'%\[', r'!\{', + r'\[', r'\[@', r'\[\|', r'\|>', r'\]', r'\|\]', r'\{', r'\|', r'\}', r'\$' + ) + + operators = r'[!$%&*+\./:<=>?@^|~-]' + prefix_syms = r'[!?~]' + infix_syms = r'[=<>@^|&+\*/$%-]' + primitives = ('unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array') + + tokens = { + 'escape-sequence': [ + (r'\\[\\"\'ntbr]', String.Escape), + (r'\\[0-9]{3}', String.Escape), + (r'\\x[0-9a-fA-F]{2}', String.Escape), + ], + 'root': [ + (r'\s+', Text), + (r'false|true|False|True|\(\)|\[\]', Name.Builtin.Pseudo), + (r'\b([A-Z][\w\']*)(?=\s*\.)', Name.Namespace, 'dotted'), + (r'\b([A-Z][\w\']*)', Name.Class), + (r'\(\*(?![)])', Comment, 'comment'), + (r'^\/\/.+$', Comment), + (r'\b(%s)\b' % '|'.join(keywords), Keyword), + (r'\b(%s)\b' % '|'.join(assume_keywords), Name.Exception), + (r'\b(%s)\b' % '|'.join(decl_keywords), Keyword.Declaration), + (r'(%s)' % '|'.join(keyopts[::-1]), Operator), + (r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator), + (r'\b(%s)\b' % '|'.join(primitives), Keyword.Type), + + (r"[^\W\d][\w']*", Name), + + (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float), + (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex), + (r'0[oO][0-7][0-7_]*', Number.Oct), + (r'0[bB][01][01_]*', Number.Bin), + (r'\d[\d_]*', Number.Integer), + + (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'", + String.Char), + (r"'.'", String.Char), + (r"'", Keyword), # a stray quote is another syntax element + (r"\`([\w\'.]+)\`", Operator.Word), # for infix applications + (r"\`", Keyword), # for quoting + (r'"', String.Double, 'string'), + + (r'[~?][a-z][\w\']*:', Name.Variable), + ], + 'comment': [ + (r'[^(*)]+', Comment), + (r'\(\*', Comment, '#push'), + (r'\*\)', Comment, '#pop'), + (r'[(*)]', Comment), + ], + 'string': [ + (r'[^\\"]+', String.Double), + include('escape-sequence'), + (r'\\\n', String.Double), + (r'"', String.Double, '#pop'), + ], + 'dotted': [ + (r'\s+', Text), + (r'\.', Punctuation), + (r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace), + (r'[A-Z][\w\']*', Name.Class, '#pop'), + (r'[a-z_][\w\']*', Name, '#pop'), + default('#pop'), + ], + } diff --git a/pygments/lexers/modeling.py b/pygments/lexers/modeling.py old mode 100644 new mode 100755 index f4dca4a..7150141 --- a/pygments/lexers/modeling.py +++ b/pygments/lexers/modeling.py @@ -1,366 +1,366 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.modeling - ~~~~~~~~~~~~~~~~~~~~~~~~ - - Lexers for modeling languages. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, include, bygroups, using, default -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Whitespace - -from pygments.lexers.html import HtmlLexer -from pygments.lexers import _stan_builtins - -__all__ = ['ModelicaLexer', 'BugsLexer', 'JagsLexer', 'StanLexer'] - - -class ModelicaLexer(RegexLexer): - """ - For `Modelica `_ source code. - - .. versionadded:: 1.1 - """ - name = 'Modelica' - aliases = ['modelica'] - filenames = ['*.mo'] - mimetypes = ['text/x-modelica'] - - flags = re.DOTALL | re.MULTILINE - - _name = r"(?:'(?:[^\\']|\\.)+'|[a-zA-Z_]\w*)" - - tokens = { - 'whitespace': [ - (u'[\\s\ufeff]+', Text), - (r'//[^\n]*\n?', Comment.Single), - (r'/\*.*?\*/', Comment.Multiline) - ], - 'root': [ - include('whitespace'), - (r'"', String.Double, 'string'), - (r'[()\[\]{},;]+', Punctuation), - (r'\.?[*^/+-]|\.|<>|[<>:=]=?', Operator), - (r'\d+(\.?\d*[eE][-+]?\d+|\.\d*)', Number.Float), - (r'\d+', Number.Integer), - (r'(abs|acos|actualStream|array|asin|assert|AssertionLevel|atan|' - r'atan2|backSample|Boolean|cardinality|cat|ceil|change|Clock|' - r'Connections|cos|cosh|cross|delay|diagonal|div|edge|exp|' - r'ExternalObject|fill|floor|getInstanceName|hold|homotopy|' - r'identity|inStream|integer|Integer|interval|inverse|isPresent|' - r'linspace|log|log10|matrix|max|min|mod|ndims|noClock|noEvent|' - r'ones|outerProduct|pre|previous|product|Real|reinit|rem|rooted|' - r'sample|scalar|semiLinear|shiftSample|sign|sin|sinh|size|skew|' - r'smooth|spatialDistribution|sqrt|StateSelect|String|subSample|' - r'sum|superSample|symmetric|tan|tanh|terminal|terminate|time|' - r'transpose|vector|zeros)\b', Name.Builtin), - (r'(algorithm|annotation|break|connect|constant|constrainedby|der|' - r'discrete|each|else|elseif|elsewhen|encapsulated|enumeration|' - r'equation|exit|expandable|extends|external|firstTick|final|flow|for|if|' - r'import|impure|in|initial|inner|input|interval|loop|nondiscrete|outer|' - r'output|parameter|partial|protected|public|pure|redeclare|' - r'replaceable|return|stream|then|when|while)\b', - Keyword.Reserved), - (r'(and|not|or)\b', Operator.Word), - (r'(block|class|connector|end|function|model|operator|package|' - r'record|type)\b', Keyword.Reserved, 'class'), - (r'(false|true)\b', Keyword.Constant), - (r'within\b', Keyword.Reserved, 'package-prefix'), - (_name, Name) - ], - 'class': [ - include('whitespace'), - (r'(function|record)\b', Keyword.Reserved), - (r'(if|for|when|while)\b', Keyword.Reserved, '#pop'), - (_name, Name.Class, '#pop'), - default('#pop') - ], - 'package-prefix': [ - include('whitespace'), - (_name, Name.Namespace, '#pop'), - default('#pop') - ], - 'string': [ - (r'"', String.Double, '#pop'), - (r'\\[\'"?\\abfnrtv]', String.Escape), - (r'(?i)<\s*html\s*>([^\\"]|\\.)+?(<\s*/\s*html\s*>|(?="))', - using(HtmlLexer)), - (r'<|\\?[^"\\<]+', String.Double) - ] - } - - -class BugsLexer(RegexLexer): - """ - Pygments Lexer for `OpenBugs `_ and WinBugs - models. - - .. versionadded:: 1.6 - """ - - name = 'BUGS' - aliases = ['bugs', 'winbugs', 'openbugs'] - filenames = ['*.bug'] - - _FUNCTIONS = ( - # Scalar functions - 'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh', - 'cloglog', 'cos', 'cosh', 'cumulative', 'cut', 'density', 'deviance', - 'equals', 'expr', 'gammap', 'ilogit', 'icloglog', 'integral', 'log', - 'logfact', 'loggam', 'logit', 'max', 'min', 'phi', 'post.p.value', - 'pow', 'prior.p.value', 'probit', 'replicate.post', 'replicate.prior', - 'round', 'sin', 'sinh', 'solution', 'sqrt', 'step', 'tan', 'tanh', - 'trunc', - # Vector functions - 'inprod', 'interp.lin', 'inverse', 'logdet', 'mean', 'eigen.vals', - 'ode', 'prod', 'p.valueM', 'rank', 'ranked', 'replicate.postM', - 'sd', 'sort', 'sum', - # Special - 'D', 'I', 'F', 'T', 'C') - """ OpenBUGS built-in functions - - From http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAII - - This also includes - - - T, C, I : Truncation and censoring. - ``T`` and ``C`` are in OpenBUGS. ``I`` in WinBUGS. - - D : ODE - - F : Functional http://www.openbugs.info/Examples/Functionals.html - - """ - - _DISTRIBUTIONS = ('dbern', 'dbin', 'dcat', 'dnegbin', 'dpois', - 'dhyper', 'dbeta', 'dchisqr', 'ddexp', 'dexp', - 'dflat', 'dgamma', 'dgev', 'df', 'dggamma', 'dgpar', - 'dloglik', 'dlnorm', 'dlogis', 'dnorm', 'dpar', - 'dt', 'dunif', 'dweib', 'dmulti', 'ddirch', 'dmnorm', - 'dmt', 'dwish') - """ OpenBUGS built-in distributions - - Functions from - http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAI - """ - - tokens = { - 'whitespace': [ - (r"\s+", Text), - ], - 'comments': [ - # Comments - (r'#.*$', Comment.Single), - ], - 'root': [ - # Comments - include('comments'), - include('whitespace'), - # Block start - (r'(model)(\s+)(\{)', - bygroups(Keyword.Namespace, Text, Punctuation)), - # Reserved Words - (r'(for|in)(?![\w.])', Keyword.Reserved), - # Built-in Functions - (r'(%s)(?=\s*\()' - % r'|'.join(_FUNCTIONS + _DISTRIBUTIONS), - Name.Builtin), - # Regular variable names - (r'[A-Za-z][\w.]*', Name), - # Number Literals - (r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number), - # Punctuation - (r'\[|\]|\(|\)|:|,|;', Punctuation), - # Assignment operators - # SLexer makes these tokens Operators. - (r'<-|~', Operator), - # Infix and prefix operators - (r'\+|-|\*|/', Operator), - # Block - (r'[{}]', Punctuation), - ] - } - - def analyse_text(text): - if re.search(r"^\s*model\s*{", text, re.M): - return 0.7 - else: - return 0.0 - - -class JagsLexer(RegexLexer): - """ - Pygments Lexer for JAGS. - - .. versionadded:: 1.6 - """ - - name = 'JAGS' - aliases = ['jags'] - filenames = ['*.jag', '*.bug'] - - # JAGS - _FUNCTIONS = ( - 'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh', - 'cos', 'cosh', 'cloglog', - 'equals', 'exp', 'icloglog', 'ifelse', 'ilogit', 'log', 'logfact', - 'loggam', 'logit', 'phi', 'pow', 'probit', 'round', 'sin', 'sinh', - 'sqrt', 'step', 'tan', 'tanh', 'trunc', 'inprod', 'interp.lin', - 'logdet', 'max', 'mean', 'min', 'prod', 'sum', 'sd', 'inverse', - 'rank', 'sort', 't', 'acos', 'acosh', 'asin', 'asinh', 'atan', - # Truncation/Censoring (should I include) - 'T', 'I') - # Distributions with density, probability and quartile functions - _DISTRIBUTIONS = tuple('[dpq]%s' % x for x in - ('bern', 'beta', 'dchiqsqr', 'ddexp', 'dexp', - 'df', 'gamma', 'gen.gamma', 'logis', 'lnorm', - 'negbin', 'nchisqr', 'norm', 'par', 'pois', 'weib')) - # Other distributions without density and probability - _OTHER_DISTRIBUTIONS = ( - 'dt', 'dunif', 'dbetabin', 'dbern', 'dbin', 'dcat', 'dhyper', - 'ddirch', 'dmnorm', 'dwish', 'dmt', 'dmulti', 'dbinom', 'dchisq', - 'dnbinom', 'dweibull', 'ddirich') - - tokens = { - 'whitespace': [ - (r"\s+", Text), - ], - 'names': [ - # Regular variable names - (r'[a-zA-Z][\w.]*\b', Name), - ], - 'comments': [ - # do not use stateful comments - (r'(?s)/\*.*?\*/', Comment.Multiline), - # Comments - (r'#.*$', Comment.Single), - ], - 'root': [ - # Comments - include('comments'), - include('whitespace'), - # Block start - (r'(model|data)(\s+)(\{)', - bygroups(Keyword.Namespace, Text, Punctuation)), - (r'var(?![\w.])', Keyword.Declaration), - # Reserved Words - (r'(for|in)(?![\w.])', Keyword.Reserved), - # Builtins - # Need to use lookahead because . is a valid char - (r'(%s)(?=\s*\()' % r'|'.join(_FUNCTIONS - + _DISTRIBUTIONS - + _OTHER_DISTRIBUTIONS), - Name.Builtin), - # Names - include('names'), - # Number Literals - (r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number), - (r'\[|\]|\(|\)|:|,|;', Punctuation), - # Assignment operators - (r'<-|~', Operator), - # # JAGS includes many more than OpenBUGS - (r'\+|-|\*|\/|\|\|[&]{2}|[<>=]=?|\^|%.*?%', Operator), - (r'[{}]', Punctuation), - ] - } - - def analyse_text(text): - if re.search(r'^\s*model\s*\{', text, re.M): - if re.search(r'^\s*data\s*\{', text, re.M): - return 0.9 - elif re.search(r'^\s*var', text, re.M): - return 0.9 - else: - return 0.3 - else: - return 0 - - -class StanLexer(RegexLexer): - """Pygments Lexer for Stan models. - - The Stan modeling language is specified in the *Stan Modeling Language - User's Guide and Reference Manual, v2.17.0*, - `pdf `__. - - .. versionadded:: 1.6 - """ - - name = 'Stan' - aliases = ['stan'] - filenames = ['*.stan'] - - tokens = { - 'whitespace': [ - (r"\s+", Text), - ], - 'comments': [ - (r'(?s)/\*.*?\*/', Comment.Multiline), - # Comments - (r'(//|#).*$', Comment.Single), - ], - 'root': [ - # Stan is more restrictive on strings than this regex - (r'"[^"]*"', String), - # Comments - include('comments'), - # block start - include('whitespace'), - # Block start - (r'(%s)(\s*)(\{)' % - r'|'.join(('functions', 'data', r'transformed\s+?data', - 'parameters', r'transformed\s+parameters', - 'model', r'generated\s+quantities')), - bygroups(Keyword.Namespace, Text, Punctuation)), - # target keyword - (r'target\s*\+=', Keyword), - # Reserved Words - (r'(%s)\b' % r'|'.join(_stan_builtins.KEYWORDS), Keyword), - # Truncation - (r'T(?=\s*\[)', Keyword), - # Data types - (r'(%s)\b' % r'|'.join(_stan_builtins.TYPES), Keyword.Type), - # < should be punctuation, but elsewhere I can't tell if it is in - # a range constraint - (r'(<)(\s*)(upper|lower)(\s*)(=)', - bygroups(Operator, Whitespace, Keyword, Whitespace, Punctuation)), - (r'(,)(\s*)(upper)(\s*)(=)', - bygroups(Punctuation, Whitespace, Keyword, Whitespace, Punctuation)), - # Punctuation - (r"[;,\[\]()]", Punctuation), - # Builtin - (r'(%s)(?=\s*\()' % '|'.join(_stan_builtins.FUNCTIONS), Name.Builtin), - (r'(~)(\s*)(%s)(?=\s*\()' % '|'.join(_stan_builtins.DISTRIBUTIONS), - bygroups(Operator, Whitespace, Name.Builtin)), - # Special names ending in __, like lp__ - (r'[A-Za-z]\w*__\b', Name.Builtin.Pseudo), - (r'(%s)\b' % r'|'.join(_stan_builtins.RESERVED), Keyword.Reserved), - # user-defined functions - (r'[A-Za-z]\w*(?=\s*\()]', Name.Function), - # Regular variable names - (r'[A-Za-z]\w*\b', Name), - # Real Literals - (r'[0-9]+(\.[0-9]*)?([eE][+-]?[0-9]+)?', Number.Float), - (r'\.[0-9]+([eE][+-]?[0-9]+)?', Number.Float), - # Integer Literals - (r'[0-9]+', Number.Integer), - # Assignment operators - (r'<-|(?:\+|-|\.?/|\.?\*|=)?=|~', Operator), - # Infix, prefix and postfix operators (and = ) - (r"\+|-|\.?\*|\.?/|\\|'|\^|!=?|<=?|>=?|\|\||&&|%|\?|:", Operator), - # Block delimiters - (r'[{}]', Punctuation), - # Distribution | - (r'\|', Punctuation) - ] - } - - def analyse_text(text): - if re.search(r'^\s*parameters\s*\{', text, re.M): - return 1.0 - else: - return 0.0 +# -*- coding: utf-8 -*- +""" + pygments.lexers.modeling + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for modeling languages. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, using, default +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Whitespace + +from pygments.lexers.html import HtmlLexer +from pygments.lexers import _stan_builtins + +__all__ = ['ModelicaLexer', 'BugsLexer', 'JagsLexer', 'StanLexer'] + + +class ModelicaLexer(RegexLexer): + """ + For `Modelica `_ source code. + + .. versionadded:: 1.1 + """ + name = 'Modelica' + aliases = ['modelica'] + filenames = ['*.mo'] + mimetypes = ['text/x-modelica'] + + flags = re.DOTALL | re.MULTILINE + + _name = r"(?:'(?:[^\\']|\\.)+'|[a-zA-Z_]\w*)" + + tokens = { + 'whitespace': [ + (r'[\s\ufeff]+', Text), + (r'//[^\n]*\n?', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline) + ], + 'root': [ + include('whitespace'), + (r'"', String.Double, 'string'), + (r'[()\[\]{},;]+', Punctuation), + (r'\.?[*^/+-]|\.|<>|[<>:=]=?', Operator), + (r'\d+(\.?\d*[eE][-+]?\d+|\.\d*)', Number.Float), + (r'\d+', Number.Integer), + (r'(abs|acos|actualStream|array|asin|assert|AssertionLevel|atan|' + r'atan2|backSample|Boolean|cardinality|cat|ceil|change|Clock|' + r'Connections|cos|cosh|cross|delay|diagonal|div|edge|exp|' + r'ExternalObject|fill|floor|getInstanceName|hold|homotopy|' + r'identity|inStream|integer|Integer|interval|inverse|isPresent|' + r'linspace|log|log10|matrix|max|min|mod|ndims|noClock|noEvent|' + r'ones|outerProduct|pre|previous|product|Real|reinit|rem|rooted|' + r'sample|scalar|semiLinear|shiftSample|sign|sin|sinh|size|skew|' + r'smooth|spatialDistribution|sqrt|StateSelect|String|subSample|' + r'sum|superSample|symmetric|tan|tanh|terminal|terminate|time|' + r'transpose|vector|zeros)\b', Name.Builtin), + (r'(algorithm|annotation|break|connect|constant|constrainedby|der|' + r'discrete|each|else|elseif|elsewhen|encapsulated|enumeration|' + r'equation|exit|expandable|extends|external|firstTick|final|flow|for|if|' + r'import|impure|in|initial|inner|input|interval|loop|nondiscrete|outer|' + r'output|parameter|partial|protected|public|pure|redeclare|' + r'replaceable|return|stream|then|when|while)\b', + Keyword.Reserved), + (r'(and|not|or)\b', Operator.Word), + (r'(block|class|connector|end|function|model|operator|package|' + r'record|type)\b', Keyword.Reserved, 'class'), + (r'(false|true)\b', Keyword.Constant), + (r'within\b', Keyword.Reserved, 'package-prefix'), + (_name, Name) + ], + 'class': [ + include('whitespace'), + (r'(function|record)\b', Keyword.Reserved), + (r'(if|for|when|while)\b', Keyword.Reserved, '#pop'), + (_name, Name.Class, '#pop'), + default('#pop') + ], + 'package-prefix': [ + include('whitespace'), + (_name, Name.Namespace, '#pop'), + default('#pop') + ], + 'string': [ + (r'"', String.Double, '#pop'), + (r'\\[\'"?\\abfnrtv]', String.Escape), + (r'(?i)<\s*html\s*>([^\\"]|\\.)+?(<\s*/\s*html\s*>|(?="))', + using(HtmlLexer)), + (r'<|\\?[^"\\<]+', String.Double) + ] + } + + +class BugsLexer(RegexLexer): + """ + Pygments Lexer for `OpenBugs `_ and WinBugs + models. + + .. versionadded:: 1.6 + """ + + name = 'BUGS' + aliases = ['bugs', 'winbugs', 'openbugs'] + filenames = ['*.bug'] + + _FUNCTIONS = ( + # Scalar functions + 'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh', + 'cloglog', 'cos', 'cosh', 'cumulative', 'cut', 'density', 'deviance', + 'equals', 'expr', 'gammap', 'ilogit', 'icloglog', 'integral', 'log', + 'logfact', 'loggam', 'logit', 'max', 'min', 'phi', 'post.p.value', + 'pow', 'prior.p.value', 'probit', 'replicate.post', 'replicate.prior', + 'round', 'sin', 'sinh', 'solution', 'sqrt', 'step', 'tan', 'tanh', + 'trunc', + # Vector functions + 'inprod', 'interp.lin', 'inverse', 'logdet', 'mean', 'eigen.vals', + 'ode', 'prod', 'p.valueM', 'rank', 'ranked', 'replicate.postM', + 'sd', 'sort', 'sum', + # Special + 'D', 'I', 'F', 'T', 'C') + """ OpenBUGS built-in functions + + From http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAII + + This also includes + + - T, C, I : Truncation and censoring. + ``T`` and ``C`` are in OpenBUGS. ``I`` in WinBUGS. + - D : ODE + - F : Functional http://www.openbugs.info/Examples/Functionals.html + + """ + + _DISTRIBUTIONS = ('dbern', 'dbin', 'dcat', 'dnegbin', 'dpois', + 'dhyper', 'dbeta', 'dchisqr', 'ddexp', 'dexp', + 'dflat', 'dgamma', 'dgev', 'df', 'dggamma', 'dgpar', + 'dloglik', 'dlnorm', 'dlogis', 'dnorm', 'dpar', + 'dt', 'dunif', 'dweib', 'dmulti', 'ddirch', 'dmnorm', + 'dmt', 'dwish') + """ OpenBUGS built-in distributions + + Functions from + http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAI + """ + + tokens = { + 'whitespace': [ + (r"\s+", Text), + ], + 'comments': [ + # Comments + (r'#.*$', Comment.Single), + ], + 'root': [ + # Comments + include('comments'), + include('whitespace'), + # Block start + (r'(model)(\s+)(\{)', + bygroups(Keyword.Namespace, Text, Punctuation)), + # Reserved Words + (r'(for|in)(?![\w.])', Keyword.Reserved), + # Built-in Functions + (r'(%s)(?=\s*\()' + % r'|'.join(_FUNCTIONS + _DISTRIBUTIONS), + Name.Builtin), + # Regular variable names + (r'[A-Za-z][\w.]*', Name), + # Number Literals + (r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number), + # Punctuation + (r'\[|\]|\(|\)|:|,|;', Punctuation), + # Assignment operators + # SLexer makes these tokens Operators. + (r'<-|~', Operator), + # Infix and prefix operators + (r'\+|-|\*|/', Operator), + # Block + (r'[{}]', Punctuation), + ] + } + + def analyse_text(text): + if re.search(r"^\s*model\s*{", text, re.M): + return 0.7 + else: + return 0.0 + + +class JagsLexer(RegexLexer): + """ + Pygments Lexer for JAGS. + + .. versionadded:: 1.6 + """ + + name = 'JAGS' + aliases = ['jags'] + filenames = ['*.jag', '*.bug'] + + # JAGS + _FUNCTIONS = ( + 'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh', + 'cos', 'cosh', 'cloglog', + 'equals', 'exp', 'icloglog', 'ifelse', 'ilogit', 'log', 'logfact', + 'loggam', 'logit', 'phi', 'pow', 'probit', 'round', 'sin', 'sinh', + 'sqrt', 'step', 'tan', 'tanh', 'trunc', 'inprod', 'interp.lin', + 'logdet', 'max', 'mean', 'min', 'prod', 'sum', 'sd', 'inverse', + 'rank', 'sort', 't', 'acos', 'acosh', 'asin', 'asinh', 'atan', + # Truncation/Censoring (should I include) + 'T', 'I') + # Distributions with density, probability and quartile functions + _DISTRIBUTIONS = tuple('[dpq]%s' % x for x in + ('bern', 'beta', 'dchiqsqr', 'ddexp', 'dexp', + 'df', 'gamma', 'gen.gamma', 'logis', 'lnorm', + 'negbin', 'nchisqr', 'norm', 'par', 'pois', 'weib')) + # Other distributions without density and probability + _OTHER_DISTRIBUTIONS = ( + 'dt', 'dunif', 'dbetabin', 'dbern', 'dbin', 'dcat', 'dhyper', + 'ddirch', 'dmnorm', 'dwish', 'dmt', 'dmulti', 'dbinom', 'dchisq', + 'dnbinom', 'dweibull', 'ddirich') + + tokens = { + 'whitespace': [ + (r"\s+", Text), + ], + 'names': [ + # Regular variable names + (r'[a-zA-Z][\w.]*\b', Name), + ], + 'comments': [ + # do not use stateful comments + (r'(?s)/\*.*?\*/', Comment.Multiline), + # Comments + (r'#.*$', Comment.Single), + ], + 'root': [ + # Comments + include('comments'), + include('whitespace'), + # Block start + (r'(model|data)(\s+)(\{)', + bygroups(Keyword.Namespace, Text, Punctuation)), + (r'var(?![\w.])', Keyword.Declaration), + # Reserved Words + (r'(for|in)(?![\w.])', Keyword.Reserved), + # Builtins + # Need to use lookahead because . is a valid char + (r'(%s)(?=\s*\()' % r'|'.join(_FUNCTIONS + + _DISTRIBUTIONS + + _OTHER_DISTRIBUTIONS), + Name.Builtin), + # Names + include('names'), + # Number Literals + (r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number), + (r'\[|\]|\(|\)|:|,|;', Punctuation), + # Assignment operators + (r'<-|~', Operator), + # # JAGS includes many more than OpenBUGS + (r'\+|-|\*|\/|\|\|[&]{2}|[<>=]=?|\^|%.*?%', Operator), + (r'[{}]', Punctuation), + ] + } + + def analyse_text(text): + if re.search(r'^\s*model\s*\{', text, re.M): + if re.search(r'^\s*data\s*\{', text, re.M): + return 0.9 + elif re.search(r'^\s*var', text, re.M): + return 0.9 + else: + return 0.3 + else: + return 0 + + +class StanLexer(RegexLexer): + """Pygments Lexer for Stan models. + + The Stan modeling language is specified in the *Stan Modeling Language + User's Guide and Reference Manual, v2.17.0*, + `pdf `__. + + .. versionadded:: 1.6 + """ + + name = 'Stan' + aliases = ['stan'] + filenames = ['*.stan'] + + tokens = { + 'whitespace': [ + (r"\s+", Text), + ], + 'comments': [ + (r'(?s)/\*.*?\*/', Comment.Multiline), + # Comments + (r'(//|#).*$', Comment.Single), + ], + 'root': [ + # Stan is more restrictive on strings than this regex + (r'"[^"]*"', String), + # Comments + include('comments'), + # block start + include('whitespace'), + # Block start + (r'(%s)(\s*)(\{)' % + r'|'.join(('functions', 'data', r'transformed\s+?data', + 'parameters', r'transformed\s+parameters', + 'model', r'generated\s+quantities')), + bygroups(Keyword.Namespace, Text, Punctuation)), + # target keyword + (r'target\s*\+=', Keyword), + # Reserved Words + (r'(%s)\b' % r'|'.join(_stan_builtins.KEYWORDS), Keyword), + # Truncation + (r'T(?=\s*\[)', Keyword), + # Data types + (r'(%s)\b' % r'|'.join(_stan_builtins.TYPES), Keyword.Type), + # < should be punctuation, but elsewhere I can't tell if it is in + # a range constraint + (r'(<)(\s*)(upper|lower)(\s*)(=)', + bygroups(Operator, Whitespace, Keyword, Whitespace, Punctuation)), + (r'(,)(\s*)(upper)(\s*)(=)', + bygroups(Punctuation, Whitespace, Keyword, Whitespace, Punctuation)), + # Punctuation + (r"[;,\[\]()]", Punctuation), + # Builtin + (r'(%s)(?=\s*\()' % '|'.join(_stan_builtins.FUNCTIONS), Name.Builtin), + (r'(~)(\s*)(%s)(?=\s*\()' % '|'.join(_stan_builtins.DISTRIBUTIONS), + bygroups(Operator, Whitespace, Name.Builtin)), + # Special names ending in __, like lp__ + (r'[A-Za-z]\w*__\b', Name.Builtin.Pseudo), + (r'(%s)\b' % r'|'.join(_stan_builtins.RESERVED), Keyword.Reserved), + # user-defined functions + (r'[A-Za-z]\w*(?=\s*\()]', Name.Function), + # Regular variable names + (r'[A-Za-z]\w*\b', Name), + # Real Literals + (r'[0-9]+(\.[0-9]*)?([eE][+-]?[0-9]+)?', Number.Float), + (r'\.[0-9]+([eE][+-]?[0-9]+)?', Number.Float), + # Integer Literals + (r'[0-9]+', Number.Integer), + # Assignment operators + (r'<-|(?:\+|-|\.?/|\.?\*|=)?=|~', Operator), + # Infix, prefix and postfix operators (and = ) + (r"\+|-|\.?\*|\.?/|\\|'|\^|!=?|<=?|>=?|\|\||&&|%|\?|:", Operator), + # Block delimiters + (r'[{}]', Punctuation), + # Distribution | + (r'\|', Punctuation) + ] + } + + def analyse_text(text): + if re.search(r'^\s*parameters\s*\{', text, re.M): + return 1.0 + else: + return 0.0 diff --git a/pygments/lexers/modula2.py b/pygments/lexers/modula2.py old mode 100644 new mode 100755 index 4fd84da..da5db48 --- a/pygments/lexers/modula2.py +++ b/pygments/lexers/modula2.py @@ -1,1561 +1,1561 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.modula2 - ~~~~~~~~~~~~~~~~~~~~~~~ - - Multi-Dialect Lexer for Modula-2. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, include -from pygments.util import get_bool_opt, get_list_opt -from pygments.token import Text, Comment, Operator, Keyword, Name, \ - String, Number, Punctuation, Error - -__all__ = ['Modula2Lexer'] - - -# Multi-Dialect Modula-2 Lexer -class Modula2Lexer(RegexLexer): - """ - For `Modula-2 `_ source code. - - The Modula-2 lexer supports several dialects. By default, it operates in - fallback mode, recognising the *combined* literals, punctuation symbols - and operators of all supported dialects, and the *combined* reserved words - and builtins of PIM Modula-2, ISO Modula-2 and Modula-2 R10, while not - differentiating between library defined identifiers. - - To select a specific dialect, a dialect option may be passed - or a dialect tag may be embedded into a source file. - - Dialect Options: - - `m2pim` - Select PIM Modula-2 dialect. - `m2iso` - Select ISO Modula-2 dialect. - `m2r10` - Select Modula-2 R10 dialect. - `objm2` - Select Objective Modula-2 dialect. - - The PIM and ISO dialect options may be qualified with a language extension. - - Language Extensions: - - `+aglet` - Select Aglet Modula-2 extensions, available with m2iso. - `+gm2` - Select GNU Modula-2 extensions, available with m2pim. - `+p1` - Select p1 Modula-2 extensions, available with m2iso. - `+xds` - Select XDS Modula-2 extensions, available with m2iso. - - - Passing a Dialect Option via Unix Commandline Interface - - Dialect options may be passed to the lexer using the `dialect` key. - Only one such option should be passed. If multiple dialect options are - passed, the first valid option is used, any subsequent options are ignored. - - Examples: - - `$ pygmentize -O full,dialect=m2iso -f html -o /path/to/output /path/to/input` - Use ISO dialect to render input to HTML output - `$ pygmentize -O full,dialect=m2iso+p1 -f rtf -o /path/to/output /path/to/input` - Use ISO dialect with p1 extensions to render input to RTF output - - - Embedding a Dialect Option within a source file - - A dialect option may be embedded in a source file in form of a dialect - tag, a specially formatted comment that specifies a dialect option. - - Dialect Tag EBNF:: - - dialectTag : - OpeningCommentDelim Prefix dialectOption ClosingCommentDelim ; - - dialectOption : - 'm2pim' | 'm2iso' | 'm2r10' | 'objm2' | - 'm2iso+aglet' | 'm2pim+gm2' | 'm2iso+p1' | 'm2iso+xds' ; - - Prefix : '!' ; - - OpeningCommentDelim : '(*' ; - - ClosingCommentDelim : '*)' ; - - No whitespace is permitted between the tokens of a dialect tag. - - In the event that a source file contains multiple dialect tags, the first - tag that contains a valid dialect option will be used and any subsequent - dialect tags will be ignored. Ideally, a dialect tag should be placed - at the beginning of a source file. - - An embedded dialect tag overrides a dialect option set via command line. - - Examples: - - ``(*!m2r10*) DEFINITION MODULE Foobar; ...`` - Use Modula2 R10 dialect to render this source file. - ``(*!m2pim+gm2*) DEFINITION MODULE Bazbam; ...`` - Use PIM dialect with GNU extensions to render this source file. - - - Algol Publication Mode: - - In Algol publication mode, source text is rendered for publication of - algorithms in scientific papers and academic texts, following the format - of the Revised Algol-60 Language Report. It is activated by passing - one of two corresponding styles as an option: - - `algol` - render reserved words lowercase underline boldface - and builtins lowercase boldface italic - `algol_nu` - render reserved words lowercase boldface (no underlining) - and builtins lowercase boldface italic - - The lexer automatically performs the required lowercase conversion when - this mode is activated. - - Example: - - ``$ pygmentize -O full,style=algol -f latex -o /path/to/output /path/to/input`` - Render input file in Algol publication mode to LaTeX output. - - - Rendering Mode of First Class ADT Identifiers: - - The rendering of standard library first class ADT identifiers is controlled - by option flag "treat_stdlib_adts_as_builtins". - - When this option is turned on, standard library ADT identifiers are rendered - as builtins. When it is turned off, they are rendered as ordinary library - identifiers. - - `treat_stdlib_adts_as_builtins` (default: On) - - The option is useful for dialects that support ADTs as first class objects - and provide ADTs in the standard library that would otherwise be built-in. - - At present, only Modula-2 R10 supports library ADTs as first class objects - and therefore, no ADT identifiers are defined for any other dialects. - - Example: - - ``$ pygmentize -O full,dialect=m2r10,treat_stdlib_adts_as_builtins=Off ...`` - Render standard library ADTs as ordinary library types. - - .. versionadded:: 1.3 - - .. versionchanged:: 2.1 - Added multi-dialect support. - """ - name = 'Modula-2' - aliases = ['modula2', 'm2'] - filenames = ['*.def', '*.mod'] - mimetypes = ['text/x-modula2'] - - flags = re.MULTILINE | re.DOTALL - - tokens = { - 'whitespace': [ - (r'\n+', Text), # blank lines - (r'\s+', Text), # whitespace - ], - 'dialecttags': [ - # PIM Dialect Tag - (r'\(\*!m2pim\*\)', Comment.Special), - # ISO Dialect Tag - (r'\(\*!m2iso\*\)', Comment.Special), - # M2R10 Dialect Tag - (r'\(\*!m2r10\*\)', Comment.Special), - # ObjM2 Dialect Tag - (r'\(\*!objm2\*\)', Comment.Special), - # Aglet Extensions Dialect Tag - (r'\(\*!m2iso\+aglet\*\)', Comment.Special), - # GNU Extensions Dialect Tag - (r'\(\*!m2pim\+gm2\*\)', Comment.Special), - # p1 Extensions Dialect Tag - (r'\(\*!m2iso\+p1\*\)', Comment.Special), - # XDS Extensions Dialect Tag - (r'\(\*!m2iso\+xds\*\)', Comment.Special), - ], - 'identifiers': [ - (r'([a-zA-Z_$][\w$]*)', Name), - ], - 'prefixed_number_literals': [ - # - # Base-2, whole number - (r'0b[01]+(\'[01]+)*', Number.Bin), - # - # Base-16, whole number - (r'0[ux][0-9A-F]+(\'[0-9A-F]+)*', Number.Hex), - ], - 'plain_number_literals': [ - # - # Base-10, real number with exponent - (r'[0-9]+(\'[0-9]+)*' # integral part - r'\.[0-9]+(\'[0-9]+)*' # fractional part - r'[eE][+-]?[0-9]+(\'[0-9]+)*', # exponent - Number.Float), - # - # Base-10, real number without exponent - (r'[0-9]+(\'[0-9]+)*' # integral part - r'\.[0-9]+(\'[0-9]+)*', # fractional part - Number.Float), - # - # Base-10, whole number - (r'[0-9]+(\'[0-9]+)*', Number.Integer), - ], - 'suffixed_number_literals': [ - # - # Base-8, whole number - (r'[0-7]+B', Number.Oct), - # - # Base-8, character code - (r'[0-7]+C', Number.Oct), - # - # Base-16, number - (r'[0-9A-F]+H', Number.Hex), - ], - 'string_literals': [ - (r"'(\\\\|\\'|[^'])*'", String), # single quoted string - (r'"(\\\\|\\"|[^"])*"', String), # double quoted string - ], - 'digraph_operators': [ - # Dot Product Operator - (r'\*\.', Operator), - # Array Concatenation Operator - (r'\+>', Operator), # M2R10 + ObjM2 - # Inequality Operator - (r'<>', Operator), # ISO + PIM - # Less-Or-Equal, Subset - (r'<=', Operator), - # Greater-Or-Equal, Superset - (r'>=', Operator), - # Identity Operator - (r'==', Operator), # M2R10 + ObjM2 - # Type Conversion Operator - (r'::', Operator), # M2R10 + ObjM2 - # Assignment Symbol - (r':=', Operator), - # Postfix Increment Mutator - (r'\+\+', Operator), # M2R10 + ObjM2 - # Postfix Decrement Mutator - (r'--', Operator), # M2R10 + ObjM2 - ], - 'unigraph_operators': [ - # Arithmetic Operators - (r'[+-]', Operator), - (r'[*/]', Operator), - # ISO 80000-2 compliant Set Difference Operator - (r'\\', Operator), # M2R10 + ObjM2 - # Relational Operators - (r'[=#<>]', Operator), - # Dereferencing Operator - (r'\^', Operator), - # Dereferencing Operator Synonym - (r'@', Operator), # ISO - # Logical AND Operator Synonym - (r'&', Operator), # PIM + ISO - # Logical NOT Operator Synonym - (r'~', Operator), # PIM + ISO - # Smalltalk Message Prefix - (r'`', Operator), # ObjM2 - ], - 'digraph_punctuation': [ - # Range Constructor - (r'\.\.', Punctuation), - # Opening Chevron Bracket - (r'<<', Punctuation), # M2R10 + ISO - # Closing Chevron Bracket - (r'>>', Punctuation), # M2R10 + ISO - # Blueprint Punctuation - (r'->', Punctuation), # M2R10 + ISO - # Distinguish |# and # in M2 R10 - (r'\|#', Punctuation), - # Distinguish ## and # in M2 R10 - (r'##', Punctuation), - # Distinguish |* and * in M2 R10 - (r'\|\*', Punctuation), - ], - 'unigraph_punctuation': [ - # Common Punctuation - (r'[()\[\]{},.:;|]', Punctuation), - # Case Label Separator Synonym - (r'!', Punctuation), # ISO - # Blueprint Punctuation - (r'\?', Punctuation), # M2R10 + ObjM2 - ], - 'comments': [ - # Single Line Comment - (r'^//.*?\n', Comment.Single), # M2R10 + ObjM2 - # Block Comment - (r'\(\*([^$].*?)\*\)', Comment.Multiline), - # Template Block Comment - (r'/\*(.*?)\*/', Comment.Multiline), # M2R10 + ObjM2 - ], - 'pragmas': [ - # ISO Style Pragmas - (r'<\*.*?\*>', Comment.Preproc), # ISO, M2R10 + ObjM2 - # Pascal Style Pragmas - (r'\(\*\$.*?\*\)', Comment.Preproc), # PIM - ], - 'root': [ - include('whitespace'), - include('dialecttags'), - include('pragmas'), - include('comments'), - include('identifiers'), - include('suffixed_number_literals'), # PIM + ISO - include('prefixed_number_literals'), # M2R10 + ObjM2 - include('plain_number_literals'), - include('string_literals'), - include('digraph_punctuation'), - include('digraph_operators'), - include('unigraph_punctuation'), - include('unigraph_operators'), - ] - } - -# C o m m o n D a t a s e t s - - # Common Reserved Words Dataset - common_reserved_words = ( - # 37 common reserved words - 'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV', - 'DO', 'ELSE', 'ELSIF', 'END', 'EXIT', 'FOR', 'FROM', 'IF', - 'IMPLEMENTATION', 'IMPORT', 'IN', 'LOOP', 'MOD', 'MODULE', 'NOT', - 'OF', 'OR', 'POINTER', 'PROCEDURE', 'RECORD', 'REPEAT', 'RETURN', - 'SET', 'THEN', 'TO', 'TYPE', 'UNTIL', 'VAR', 'WHILE', - ) - - # Common Builtins Dataset - common_builtins = ( - # 16 common builtins - 'ABS', 'BOOLEAN', 'CARDINAL', 'CHAR', 'CHR', 'FALSE', 'INTEGER', - 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NIL', 'ODD', 'ORD', 'REAL', - 'TRUE', - ) - - # Common Pseudo-Module Builtins Dataset - common_pseudo_builtins = ( - # 4 common pseudo builtins - 'ADDRESS', 'BYTE', 'WORD', 'ADR' - ) - -# P I M M o d u l a - 2 D a t a s e t s - - # Lexemes to Mark as Error Tokens for PIM Modula-2 - pim_lexemes_to_reject = ( - '!', '`', '@', '$', '%', '?', '\\', '==', '++', '--', '::', '*.', - '+>', '->', '<<', '>>', '|#', '##', - ) - - # PIM Modula-2 Additional Reserved Words Dataset - pim_additional_reserved_words = ( - # 3 additional reserved words - 'EXPORT', 'QUALIFIED', 'WITH', - ) - - # PIM Modula-2 Additional Builtins Dataset - pim_additional_builtins = ( - # 16 additional builtins - 'BITSET', 'CAP', 'DEC', 'DISPOSE', 'EXCL', 'FLOAT', 'HALT', 'HIGH', - 'INC', 'INCL', 'NEW', 'NIL', 'PROC', 'SIZE', 'TRUNC', 'VAL', - ) - - # PIM Modula-2 Additional Pseudo-Module Builtins Dataset - pim_additional_pseudo_builtins = ( - # 5 additional pseudo builtins - 'SYSTEM', 'PROCESS', 'TSIZE', 'NEWPROCESS', 'TRANSFER', - ) - -# I S O M o d u l a - 2 D a t a s e t s - - # Lexemes to Mark as Error Tokens for ISO Modula-2 - iso_lexemes_to_reject = ( - '`', '$', '%', '?', '\\', '==', '++', '--', '::', '*.', '+>', '->', - '<<', '>>', '|#', '##', - ) - - # ISO Modula-2 Additional Reserved Words Dataset - iso_additional_reserved_words = ( - # 9 additional reserved words (ISO 10514-1) - 'EXCEPT', 'EXPORT', 'FINALLY', 'FORWARD', 'PACKEDSET', 'QUALIFIED', - 'REM', 'RETRY', 'WITH', - # 10 additional reserved words (ISO 10514-2 & ISO 10514-3) - 'ABSTRACT', 'AS', 'CLASS', 'GUARD', 'INHERIT', 'OVERRIDE', 'READONLY', - 'REVEAL', 'TRACED', 'UNSAFEGUARDED', - ) - - # ISO Modula-2 Additional Builtins Dataset - iso_additional_builtins = ( - # 26 additional builtins (ISO 10514-1) - 'BITSET', 'CAP', 'CMPLX', 'COMPLEX', 'DEC', 'DISPOSE', 'EXCL', 'FLOAT', - 'HALT', 'HIGH', 'IM', 'INC', 'INCL', 'INT', 'INTERRUPTIBLE', 'LENGTH', - 'LFLOAT', 'LONGCOMPLEX', 'NEW', 'PROC', 'PROTECTION', 'RE', 'SIZE', - 'TRUNC', 'UNINTERRUBTIBLE', 'VAL', - # 5 additional builtins (ISO 10514-2 & ISO 10514-3) - 'CREATE', 'DESTROY', 'EMPTY', 'ISMEMBER', 'SELF', - ) - - # ISO Modula-2 Additional Pseudo-Module Builtins Dataset - iso_additional_pseudo_builtins = ( - # 14 additional builtins (SYSTEM) - 'SYSTEM', 'BITSPERLOC', 'LOCSPERBYTE', 'LOCSPERWORD', 'LOC', - 'ADDADR', 'SUBADR', 'DIFADR', 'MAKEADR', 'ADR', - 'ROTATE', 'SHIFT', 'CAST', 'TSIZE', - # 13 additional builtins (COROUTINES) - 'COROUTINES', 'ATTACH', 'COROUTINE', 'CURRENT', 'DETACH', 'HANDLER', - 'INTERRUPTSOURCE', 'IOTRANSFER', 'IsATTACHED', 'LISTEN', - 'NEWCOROUTINE', 'PROT', 'TRANSFER', - # 9 additional builtins (EXCEPTIONS) - 'EXCEPTIONS', 'AllocateSource', 'CurrentNumber', 'ExceptionNumber', - 'ExceptionSource', 'GetMessage', 'IsCurrentSource', - 'IsExceptionalExecution', 'RAISE', - # 3 additional builtins (TERMINATION) - 'TERMINATION', 'IsTerminating', 'HasHalted', - # 4 additional builtins (M2EXCEPTION) - 'M2EXCEPTION', 'M2Exceptions', 'M2Exception', 'IsM2Exception', - 'indexException', 'rangeException', 'caseSelectException', - 'invalidLocation', 'functionException', 'wholeValueException', - 'wholeDivException', 'realValueException', 'realDivException', - 'complexValueException', 'complexDivException', 'protException', - 'sysException', 'coException', 'exException', - ) - -# M o d u l a - 2 R 1 0 D a t a s e t s - - # Lexemes to Mark as Error Tokens for Modula-2 R10 - m2r10_lexemes_to_reject = ( - '!', '`', '@', '$', '%', '&', '<>', - ) - - # Modula-2 R10 reserved words in addition to the common set - m2r10_additional_reserved_words = ( - # 12 additional reserved words - 'ALIAS', 'ARGLIST', 'BLUEPRINT', 'COPY', 'GENLIB', 'INDETERMINATE', - 'NEW', 'NONE', 'OPAQUE', 'REFERENTIAL', 'RELEASE', 'RETAIN', - # 2 additional reserved words with symbolic assembly option - 'ASM', 'REG', - ) - - # Modula-2 R10 builtins in addition to the common set - m2r10_additional_builtins = ( - # 26 additional builtins - 'CARDINAL', 'COUNT', 'EMPTY', 'EXISTS', 'INSERT', 'LENGTH', 'LONGCARD', - 'OCTET', 'PTR', 'PRED', 'READ', 'READNEW', 'REMOVE', 'RETRIEVE', 'SORT', - 'STORE', 'SUBSET', 'SUCC', 'TLIMIT', 'TMAX', 'TMIN', 'TRUE', 'TSIZE', - 'UNICHAR', 'WRITE', 'WRITEF', - ) - - # Modula-2 R10 Additional Pseudo-Module Builtins Dataset - m2r10_additional_pseudo_builtins = ( - # 13 additional builtins (TPROPERTIES) - 'TPROPERTIES', 'PROPERTY', 'LITERAL', 'TPROPERTY', 'TLITERAL', - 'TBUILTIN', 'TDYN', 'TREFC', 'TNIL', 'TBASE', 'TPRECISION', - 'TMAXEXP', 'TMINEXP', - # 4 additional builtins (CONVERSION) - 'CONVERSION', 'TSXFSIZE', 'SXF', 'VAL', - # 35 additional builtins (UNSAFE) - 'UNSAFE', 'CAST', 'INTRINSIC', 'AVAIL', 'ADD', 'SUB', 'ADDC', 'SUBC', - 'FETCHADD', 'FETCHSUB', 'SHL', 'SHR', 'ASHR', 'ROTL', 'ROTR', 'ROTLC', - 'ROTRC', 'BWNOT', 'BWAND', 'BWOR', 'BWXOR', 'BWNAND', 'BWNOR', - 'SETBIT', 'TESTBIT', 'LSBIT', 'MSBIT', 'CSBITS', 'BAIL', 'HALT', - 'TODO', 'FFI', 'ADDR', 'VARGLIST', 'VARGC', - # 11 additional builtins (ATOMIC) - 'ATOMIC', 'INTRINSIC', 'AVAIL', 'SWAP', 'CAS', 'INC', 'DEC', 'BWAND', - 'BWNAND', 'BWOR', 'BWXOR', - # 7 additional builtins (COMPILER) - 'COMPILER', 'DEBUG', 'MODNAME', 'PROCNAME', 'LINENUM', 'DEFAULT', - 'HASH', - # 5 additional builtins (ASSEMBLER) - 'ASSEMBLER', 'REGISTER', 'SETREG', 'GETREG', 'CODE', - ) - -# O b j e c t i v e M o d u l a - 2 D a t a s e t s - - # Lexemes to Mark as Error Tokens for Objective Modula-2 - objm2_lexemes_to_reject = ( - '!', '$', '%', '&', '<>', - ) - - # Objective Modula-2 Extensions - # reserved words in addition to Modula-2 R10 - objm2_additional_reserved_words = ( - # 16 additional reserved words - 'BYCOPY', 'BYREF', 'CLASS', 'CONTINUE', 'CRITICAL', 'INOUT', 'METHOD', - 'ON', 'OPTIONAL', 'OUT', 'PRIVATE', 'PROTECTED', 'PROTOCOL', 'PUBLIC', - 'SUPER', 'TRY', - ) - - # Objective Modula-2 Extensions - # builtins in addition to Modula-2 R10 - objm2_additional_builtins = ( - # 3 additional builtins - 'OBJECT', 'NO', 'YES', - ) - - # Objective Modula-2 Extensions - # pseudo-module builtins in addition to Modula-2 R10 - objm2_additional_pseudo_builtins = ( - # None - ) - -# A g l e t M o d u l a - 2 D a t a s e t s - - # Aglet Extensions - # reserved words in addition to ISO Modula-2 - aglet_additional_reserved_words = ( - # None - ) - - # Aglet Extensions - # builtins in addition to ISO Modula-2 - aglet_additional_builtins = ( - # 9 additional builtins - 'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16', - 'CARDINAL32', 'INTEGER8', 'INTEGER16', 'INTEGER32', - ) - - # Aglet Modula-2 Extensions - # pseudo-module builtins in addition to ISO Modula-2 - aglet_additional_pseudo_builtins = ( - # None - ) - -# G N U M o d u l a - 2 D a t a s e t s - - # GNU Extensions - # reserved words in addition to PIM Modula-2 - gm2_additional_reserved_words = ( - # 10 additional reserved words - 'ASM', '__ATTRIBUTE__', '__BUILTIN__', '__COLUMN__', '__DATE__', - '__FILE__', '__FUNCTION__', '__LINE__', '__MODULE__', 'VOLATILE', - ) - - # GNU Extensions - # builtins in addition to PIM Modula-2 - gm2_additional_builtins = ( - # 21 additional builtins - 'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16', - 'CARDINAL32', 'CARDINAL64', 'COMPLEX32', 'COMPLEX64', 'COMPLEX96', - 'COMPLEX128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64', - 'REAL8', 'REAL16', 'REAL32', 'REAL96', 'REAL128', 'THROW', - ) - - # GNU Extensions - # pseudo-module builtins in addition to PIM Modula-2 - gm2_additional_pseudo_builtins = ( - # None - ) - -# p 1 M o d u l a - 2 D a t a s e t s - - # p1 Extensions - # reserved words in addition to ISO Modula-2 - p1_additional_reserved_words = ( - # None - ) - - # p1 Extensions - # builtins in addition to ISO Modula-2 - p1_additional_builtins = ( - # None - ) - - # p1 Modula-2 Extensions - # pseudo-module builtins in addition to ISO Modula-2 - p1_additional_pseudo_builtins = ( - # 1 additional builtin - 'BCD', - ) - -# X D S M o d u l a - 2 D a t a s e t s - - # XDS Extensions - # reserved words in addition to ISO Modula-2 - xds_additional_reserved_words = ( - # 1 additional reserved word - 'SEQ', - ) - - # XDS Extensions - # builtins in addition to ISO Modula-2 - xds_additional_builtins = ( - # 9 additional builtins - 'ASH', 'ASSERT', 'DIFFADR_TYPE', 'ENTIER', 'INDEX', 'LEN', - 'LONGCARD', 'SHORTCARD', 'SHORTINT', - ) - - # XDS Modula-2 Extensions - # pseudo-module builtins in addition to ISO Modula-2 - xds_additional_pseudo_builtins = ( - # 22 additional builtins (SYSTEM) - 'PROCESS', 'NEWPROCESS', 'BOOL8', 'BOOL16', 'BOOL32', 'CARD8', - 'CARD16', 'CARD32', 'INT8', 'INT16', 'INT32', 'REF', 'MOVE', - 'FILL', 'GET', 'PUT', 'CC', 'int', 'unsigned', 'size_t', 'void' - # 3 additional builtins (COMPILER) - 'COMPILER', 'OPTION', 'EQUATION' - ) - -# P I M S t a n d a r d L i b r a r y D a t a s e t s - - # PIM Modula-2 Standard Library Modules Dataset - pim_stdlib_module_identifiers = ( - 'Terminal', 'FileSystem', 'InOut', 'RealInOut', 'MathLib0', 'Storage', - ) - - # PIM Modula-2 Standard Library Types Dataset - pim_stdlib_type_identifiers = ( - 'Flag', 'FlagSet', 'Response', 'Command', 'Lock', 'Permission', - 'MediumType', 'File', 'FileProc', 'DirectoryProc', 'FileCommand', - 'DirectoryCommand', - ) - - # PIM Modula-2 Standard Library Procedures Dataset - pim_stdlib_proc_identifiers = ( - 'Read', 'BusyRead', 'ReadAgain', 'Write', 'WriteString', 'WriteLn', - 'Create', 'Lookup', 'Close', 'Delete', 'Rename', 'SetRead', 'SetWrite', - 'SetModify', 'SetOpen', 'Doio', 'SetPos', 'GetPos', 'Length', 'Reset', - 'Again', 'ReadWord', 'WriteWord', 'ReadChar', 'WriteChar', - 'CreateMedium', 'DeleteMedium', 'AssignName', 'DeassignName', - 'ReadMedium', 'LookupMedium', 'OpenInput', 'OpenOutput', 'CloseInput', - 'CloseOutput', 'ReadString', 'ReadInt', 'ReadCard', 'ReadWrd', - 'WriteInt', 'WriteCard', 'WriteOct', 'WriteHex', 'WriteWrd', - 'ReadReal', 'WriteReal', 'WriteFixPt', 'WriteRealOct', 'sqrt', 'exp', - 'ln', 'sin', 'cos', 'arctan', 'entier', 'ALLOCATE', 'DEALLOCATE', - ) - - # PIM Modula-2 Standard Library Variables Dataset - pim_stdlib_var_identifiers = ( - 'Done', 'termCH', 'in', 'out' - ) - - # PIM Modula-2 Standard Library Constants Dataset - pim_stdlib_const_identifiers = ( - 'EOL', - ) - -# I S O S t a n d a r d L i b r a r y D a t a s e t s - - # ISO Modula-2 Standard Library Modules Dataset - iso_stdlib_module_identifiers = ( - # TO DO - ) - - # ISO Modula-2 Standard Library Types Dataset - iso_stdlib_type_identifiers = ( - # TO DO - ) - - # ISO Modula-2 Standard Library Procedures Dataset - iso_stdlib_proc_identifiers = ( - # TO DO - ) - - # ISO Modula-2 Standard Library Variables Dataset - iso_stdlib_var_identifiers = ( - # TO DO - ) - - # ISO Modula-2 Standard Library Constants Dataset - iso_stdlib_const_identifiers = ( - # TO DO - ) - -# M 2 R 1 0 S t a n d a r d L i b r a r y D a t a s e t s - - # Modula-2 R10 Standard Library ADTs Dataset - m2r10_stdlib_adt_identifiers = ( - 'BCD', 'LONGBCD', 'BITSET', 'SHORTBITSET', 'LONGBITSET', - 'LONGLONGBITSET', 'COMPLEX', 'LONGCOMPLEX', 'SHORTCARD', 'LONGLONGCARD', - 'SHORTINT', 'LONGLONGINT', 'POSINT', 'SHORTPOSINT', 'LONGPOSINT', - 'LONGLONGPOSINT', 'BITSET8', 'BITSET16', 'BITSET32', 'BITSET64', - 'BITSET128', 'BS8', 'BS16', 'BS32', 'BS64', 'BS128', 'CARDINAL8', - 'CARDINAL16', 'CARDINAL32', 'CARDINAL64', 'CARDINAL128', 'CARD8', - 'CARD16', 'CARD32', 'CARD64', 'CARD128', 'INTEGER8', 'INTEGER16', - 'INTEGER32', 'INTEGER64', 'INTEGER128', 'INT8', 'INT16', 'INT32', - 'INT64', 'INT128', 'STRING', 'UNISTRING', - ) - - # Modula-2 R10 Standard Library Blueprints Dataset - m2r10_stdlib_blueprint_identifiers = ( - 'ProtoRoot', 'ProtoComputational', 'ProtoNumeric', 'ProtoScalar', - 'ProtoNonScalar', 'ProtoCardinal', 'ProtoInteger', 'ProtoReal', - 'ProtoComplex', 'ProtoVector', 'ProtoTuple', 'ProtoCompArray', - 'ProtoCollection', 'ProtoStaticArray', 'ProtoStaticSet', - 'ProtoStaticString', 'ProtoArray', 'ProtoString', 'ProtoSet', - 'ProtoMultiSet', 'ProtoDictionary', 'ProtoMultiDict', 'ProtoExtension', - 'ProtoIO', 'ProtoCardMath', 'ProtoIntMath', 'ProtoRealMath', - ) - - # Modula-2 R10 Standard Library Modules Dataset - m2r10_stdlib_module_identifiers = ( - 'ASCII', 'BooleanIO', 'CharIO', 'UnicharIO', 'OctetIO', - 'CardinalIO', 'LongCardIO', 'IntegerIO', 'LongIntIO', 'RealIO', - 'LongRealIO', 'BCDIO', 'LongBCDIO', 'CardMath', 'LongCardMath', - 'IntMath', 'LongIntMath', 'RealMath', 'LongRealMath', 'BCDMath', - 'LongBCDMath', 'FileIO', 'FileSystem', 'Storage', 'IOSupport', - ) - - # Modula-2 R10 Standard Library Types Dataset - m2r10_stdlib_type_identifiers = ( - 'File', 'Status', - # TO BE COMPLETED - ) - - # Modula-2 R10 Standard Library Procedures Dataset - m2r10_stdlib_proc_identifiers = ( - 'ALLOCATE', 'DEALLOCATE', 'SIZE', - # TO BE COMPLETED - ) - - # Modula-2 R10 Standard Library Variables Dataset - m2r10_stdlib_var_identifiers = ( - 'stdIn', 'stdOut', 'stdErr', - ) - - # Modula-2 R10 Standard Library Constants Dataset - m2r10_stdlib_const_identifiers = ( - 'pi', 'tau', - ) - -# D i a l e c t s - - # Dialect modes - dialects = ( - 'unknown', - 'm2pim', 'm2iso', 'm2r10', 'objm2', - 'm2iso+aglet', 'm2pim+gm2', 'm2iso+p1', 'm2iso+xds', - ) - -# D a t a b a s e s - - # Lexemes to Mark as Errors Database - lexemes_to_reject_db = { - # Lexemes to reject for unknown dialect - 'unknown': ( - # LEAVE THIS EMPTY - ), - # Lexemes to reject for PIM Modula-2 - 'm2pim': ( - pim_lexemes_to_reject, - ), - # Lexemes to reject for ISO Modula-2 - 'm2iso': ( - iso_lexemes_to_reject, - ), - # Lexemes to reject for Modula-2 R10 - 'm2r10': ( - m2r10_lexemes_to_reject, - ), - # Lexemes to reject for Objective Modula-2 - 'objm2': ( - objm2_lexemes_to_reject, - ), - # Lexemes to reject for Aglet Modula-2 - 'm2iso+aglet': ( - iso_lexemes_to_reject, - ), - # Lexemes to reject for GNU Modula-2 - 'm2pim+gm2': ( - pim_lexemes_to_reject, - ), - # Lexemes to reject for p1 Modula-2 - 'm2iso+p1': ( - iso_lexemes_to_reject, - ), - # Lexemes to reject for XDS Modula-2 - 'm2iso+xds': ( - iso_lexemes_to_reject, - ), - } - - # Reserved Words Database - reserved_words_db = { - # Reserved words for unknown dialect - 'unknown': ( - common_reserved_words, - pim_additional_reserved_words, - iso_additional_reserved_words, - m2r10_additional_reserved_words, - ), - - # Reserved words for PIM Modula-2 - 'm2pim': ( - common_reserved_words, - pim_additional_reserved_words, - ), - - # Reserved words for Modula-2 R10 - 'm2iso': ( - common_reserved_words, - iso_additional_reserved_words, - ), - - # Reserved words for ISO Modula-2 - 'm2r10': ( - common_reserved_words, - m2r10_additional_reserved_words, - ), - - # Reserved words for Objective Modula-2 - 'objm2': ( - common_reserved_words, - m2r10_additional_reserved_words, - objm2_additional_reserved_words, - ), - - # Reserved words for Aglet Modula-2 Extensions - 'm2iso+aglet': ( - common_reserved_words, - iso_additional_reserved_words, - aglet_additional_reserved_words, - ), - - # Reserved words for GNU Modula-2 Extensions - 'm2pim+gm2': ( - common_reserved_words, - pim_additional_reserved_words, - gm2_additional_reserved_words, - ), - - # Reserved words for p1 Modula-2 Extensions - 'm2iso+p1': ( - common_reserved_words, - iso_additional_reserved_words, - p1_additional_reserved_words, - ), - - # Reserved words for XDS Modula-2 Extensions - 'm2iso+xds': ( - common_reserved_words, - iso_additional_reserved_words, - xds_additional_reserved_words, - ), - } - - # Builtins Database - builtins_db = { - # Builtins for unknown dialect - 'unknown': ( - common_builtins, - pim_additional_builtins, - iso_additional_builtins, - m2r10_additional_builtins, - ), - - # Builtins for PIM Modula-2 - 'm2pim': ( - common_builtins, - pim_additional_builtins, - ), - - # Builtins for ISO Modula-2 - 'm2iso': ( - common_builtins, - iso_additional_builtins, - ), - - # Builtins for ISO Modula-2 - 'm2r10': ( - common_builtins, - m2r10_additional_builtins, - ), - - # Builtins for Objective Modula-2 - 'objm2': ( - common_builtins, - m2r10_additional_builtins, - objm2_additional_builtins, - ), - - # Builtins for Aglet Modula-2 Extensions - 'm2iso+aglet': ( - common_builtins, - iso_additional_builtins, - aglet_additional_builtins, - ), - - # Builtins for GNU Modula-2 Extensions - 'm2pim+gm2': ( - common_builtins, - pim_additional_builtins, - gm2_additional_builtins, - ), - - # Builtins for p1 Modula-2 Extensions - 'm2iso+p1': ( - common_builtins, - iso_additional_builtins, - p1_additional_builtins, - ), - - # Builtins for XDS Modula-2 Extensions - 'm2iso+xds': ( - common_builtins, - iso_additional_builtins, - xds_additional_builtins, - ), - } - - # Pseudo-Module Builtins Database - pseudo_builtins_db = { - # Builtins for unknown dialect - 'unknown': ( - common_pseudo_builtins, - pim_additional_pseudo_builtins, - iso_additional_pseudo_builtins, - m2r10_additional_pseudo_builtins, - ), - - # Builtins for PIM Modula-2 - 'm2pim': ( - common_pseudo_builtins, - pim_additional_pseudo_builtins, - ), - - # Builtins for ISO Modula-2 - 'm2iso': ( - common_pseudo_builtins, - iso_additional_pseudo_builtins, - ), - - # Builtins for ISO Modula-2 - 'm2r10': ( - common_pseudo_builtins, - m2r10_additional_pseudo_builtins, - ), - - # Builtins for Objective Modula-2 - 'objm2': ( - common_pseudo_builtins, - m2r10_additional_pseudo_builtins, - objm2_additional_pseudo_builtins, - ), - - # Builtins for Aglet Modula-2 Extensions - 'm2iso+aglet': ( - common_pseudo_builtins, - iso_additional_pseudo_builtins, - aglet_additional_pseudo_builtins, - ), - - # Builtins for GNU Modula-2 Extensions - 'm2pim+gm2': ( - common_pseudo_builtins, - pim_additional_pseudo_builtins, - gm2_additional_pseudo_builtins, - ), - - # Builtins for p1 Modula-2 Extensions - 'm2iso+p1': ( - common_pseudo_builtins, - iso_additional_pseudo_builtins, - p1_additional_pseudo_builtins, - ), - - # Builtins for XDS Modula-2 Extensions - 'm2iso+xds': ( - common_pseudo_builtins, - iso_additional_pseudo_builtins, - xds_additional_pseudo_builtins, - ), - } - - # Standard Library ADTs Database - stdlib_adts_db = { - # Empty entry for unknown dialect - 'unknown': ( - # LEAVE THIS EMPTY - ), - # Standard Library ADTs for PIM Modula-2 - 'm2pim': ( - # No first class library types - ), - - # Standard Library ADTs for ISO Modula-2 - 'm2iso': ( - # No first class library types - ), - - # Standard Library ADTs for Modula-2 R10 - 'm2r10': ( - m2r10_stdlib_adt_identifiers, - ), - - # Standard Library ADTs for Objective Modula-2 - 'objm2': ( - m2r10_stdlib_adt_identifiers, - ), - - # Standard Library ADTs for Aglet Modula-2 - 'm2iso+aglet': ( - # No first class library types - ), - - # Standard Library ADTs for GNU Modula-2 - 'm2pim+gm2': ( - # No first class library types - ), - - # Standard Library ADTs for p1 Modula-2 - 'm2iso+p1': ( - # No first class library types - ), - - # Standard Library ADTs for XDS Modula-2 - 'm2iso+xds': ( - # No first class library types - ), - } - - # Standard Library Modules Database - stdlib_modules_db = { - # Empty entry for unknown dialect - 'unknown': ( - # LEAVE THIS EMPTY - ), - # Standard Library Modules for PIM Modula-2 - 'm2pim': ( - pim_stdlib_module_identifiers, - ), - - # Standard Library Modules for ISO Modula-2 - 'm2iso': ( - iso_stdlib_module_identifiers, - ), - - # Standard Library Modules for Modula-2 R10 - 'm2r10': ( - m2r10_stdlib_blueprint_identifiers, - m2r10_stdlib_module_identifiers, - m2r10_stdlib_adt_identifiers, - ), - - # Standard Library Modules for Objective Modula-2 - 'objm2': ( - m2r10_stdlib_blueprint_identifiers, - m2r10_stdlib_module_identifiers, - ), - - # Standard Library Modules for Aglet Modula-2 - 'm2iso+aglet': ( - iso_stdlib_module_identifiers, - ), - - # Standard Library Modules for GNU Modula-2 - 'm2pim+gm2': ( - pim_stdlib_module_identifiers, - ), - - # Standard Library Modules for p1 Modula-2 - 'm2iso+p1': ( - iso_stdlib_module_identifiers, - ), - - # Standard Library Modules for XDS Modula-2 - 'm2iso+xds': ( - iso_stdlib_module_identifiers, - ), - } - - # Standard Library Types Database - stdlib_types_db = { - # Empty entry for unknown dialect - 'unknown': ( - # LEAVE THIS EMPTY - ), - # Standard Library Types for PIM Modula-2 - 'm2pim': ( - pim_stdlib_type_identifiers, - ), - - # Standard Library Types for ISO Modula-2 - 'm2iso': ( - iso_stdlib_type_identifiers, - ), - - # Standard Library Types for Modula-2 R10 - 'm2r10': ( - m2r10_stdlib_type_identifiers, - ), - - # Standard Library Types for Objective Modula-2 - 'objm2': ( - m2r10_stdlib_type_identifiers, - ), - - # Standard Library Types for Aglet Modula-2 - 'm2iso+aglet': ( - iso_stdlib_type_identifiers, - ), - - # Standard Library Types for GNU Modula-2 - 'm2pim+gm2': ( - pim_stdlib_type_identifiers, - ), - - # Standard Library Types for p1 Modula-2 - 'm2iso+p1': ( - iso_stdlib_type_identifiers, - ), - - # Standard Library Types for XDS Modula-2 - 'm2iso+xds': ( - iso_stdlib_type_identifiers, - ), - } - - # Standard Library Procedures Database - stdlib_procedures_db = { - # Empty entry for unknown dialect - 'unknown': ( - # LEAVE THIS EMPTY - ), - # Standard Library Procedures for PIM Modula-2 - 'm2pim': ( - pim_stdlib_proc_identifiers, - ), - - # Standard Library Procedures for ISO Modula-2 - 'm2iso': ( - iso_stdlib_proc_identifiers, - ), - - # Standard Library Procedures for Modula-2 R10 - 'm2r10': ( - m2r10_stdlib_proc_identifiers, - ), - - # Standard Library Procedures for Objective Modula-2 - 'objm2': ( - m2r10_stdlib_proc_identifiers, - ), - - # Standard Library Procedures for Aglet Modula-2 - 'm2iso+aglet': ( - iso_stdlib_proc_identifiers, - ), - - # Standard Library Procedures for GNU Modula-2 - 'm2pim+gm2': ( - pim_stdlib_proc_identifiers, - ), - - # Standard Library Procedures for p1 Modula-2 - 'm2iso+p1': ( - iso_stdlib_proc_identifiers, - ), - - # Standard Library Procedures for XDS Modula-2 - 'm2iso+xds': ( - iso_stdlib_proc_identifiers, - ), - } - - # Standard Library Variables Database - stdlib_variables_db = { - # Empty entry for unknown dialect - 'unknown': ( - # LEAVE THIS EMPTY - ), - # Standard Library Variables for PIM Modula-2 - 'm2pim': ( - pim_stdlib_var_identifiers, - ), - - # Standard Library Variables for ISO Modula-2 - 'm2iso': ( - iso_stdlib_var_identifiers, - ), - - # Standard Library Variables for Modula-2 R10 - 'm2r10': ( - m2r10_stdlib_var_identifiers, - ), - - # Standard Library Variables for Objective Modula-2 - 'objm2': ( - m2r10_stdlib_var_identifiers, - ), - - # Standard Library Variables for Aglet Modula-2 - 'm2iso+aglet': ( - iso_stdlib_var_identifiers, - ), - - # Standard Library Variables for GNU Modula-2 - 'm2pim+gm2': ( - pim_stdlib_var_identifiers, - ), - - # Standard Library Variables for p1 Modula-2 - 'm2iso+p1': ( - iso_stdlib_var_identifiers, - ), - - # Standard Library Variables for XDS Modula-2 - 'm2iso+xds': ( - iso_stdlib_var_identifiers, - ), - } - - # Standard Library Constants Database - stdlib_constants_db = { - # Empty entry for unknown dialect - 'unknown': ( - # LEAVE THIS EMPTY - ), - # Standard Library Constants for PIM Modula-2 - 'm2pim': ( - pim_stdlib_const_identifiers, - ), - - # Standard Library Constants for ISO Modula-2 - 'm2iso': ( - iso_stdlib_const_identifiers, - ), - - # Standard Library Constants for Modula-2 R10 - 'm2r10': ( - m2r10_stdlib_const_identifiers, - ), - - # Standard Library Constants for Objective Modula-2 - 'objm2': ( - m2r10_stdlib_const_identifiers, - ), - - # Standard Library Constants for Aglet Modula-2 - 'm2iso+aglet': ( - iso_stdlib_const_identifiers, - ), - - # Standard Library Constants for GNU Modula-2 - 'm2pim+gm2': ( - pim_stdlib_const_identifiers, - ), - - # Standard Library Constants for p1 Modula-2 - 'm2iso+p1': ( - iso_stdlib_const_identifiers, - ), - - # Standard Library Constants for XDS Modula-2 - 'm2iso+xds': ( - iso_stdlib_const_identifiers, - ), - } - -# M e t h o d s - - # initialise a lexer instance - def __init__(self, **options): - # - # check dialect options - # - dialects = get_list_opt(options, 'dialect', []) - # - for dialect_option in dialects: - if dialect_option in self.dialects[1:-1]: - # valid dialect option found - self.set_dialect(dialect_option) - break - # - # Fallback Mode (DEFAULT) - else: - # no valid dialect option - self.set_dialect('unknown') - # - self.dialect_set_by_tag = False - # - # check style options - # - styles = get_list_opt(options, 'style', []) - # - # use lowercase mode for Algol style - if 'algol' in styles or 'algol_nu' in styles: - self.algol_publication_mode = True - else: - self.algol_publication_mode = False - # - # Check option flags - # - self.treat_stdlib_adts_as_builtins = get_bool_opt( - options, 'treat_stdlib_adts_as_builtins', True) - # - # call superclass initialiser - RegexLexer.__init__(self, **options) - - # Set lexer to a specified dialect - def set_dialect(self, dialect_id): - # - # if __debug__: - # print 'entered set_dialect with arg: ', dialect_id - # - # check dialect name against known dialects - if dialect_id not in self.dialects: - dialect = 'unknown' # default - else: - dialect = dialect_id - # - # compose lexemes to reject set - lexemes_to_reject_set = set() - # add each list of reject lexemes for this dialect - for list in self.lexemes_to_reject_db[dialect]: - lexemes_to_reject_set.update(set(list)) - # - # compose reserved words set - reswords_set = set() - # add each list of reserved words for this dialect - for list in self.reserved_words_db[dialect]: - reswords_set.update(set(list)) - # - # compose builtins set - builtins_set = set() - # add each list of builtins for this dialect excluding reserved words - for list in self.builtins_db[dialect]: - builtins_set.update(set(list).difference(reswords_set)) - # - # compose pseudo-builtins set - pseudo_builtins_set = set() - # add each list of builtins for this dialect excluding reserved words - for list in self.pseudo_builtins_db[dialect]: - pseudo_builtins_set.update(set(list).difference(reswords_set)) - # - # compose ADTs set - adts_set = set() - # add each list of ADTs for this dialect excluding reserved words - for list in self.stdlib_adts_db[dialect]: - adts_set.update(set(list).difference(reswords_set)) - # - # compose modules set - modules_set = set() - # add each list of builtins for this dialect excluding builtins - for list in self.stdlib_modules_db[dialect]: - modules_set.update(set(list).difference(builtins_set)) - # - # compose types set - types_set = set() - # add each list of types for this dialect excluding builtins - for list in self.stdlib_types_db[dialect]: - types_set.update(set(list).difference(builtins_set)) - # - # compose procedures set - procedures_set = set() - # add each list of procedures for this dialect excluding builtins - for list in self.stdlib_procedures_db[dialect]: - procedures_set.update(set(list).difference(builtins_set)) - # - # compose variables set - variables_set = set() - # add each list of variables for this dialect excluding builtins - for list in self.stdlib_variables_db[dialect]: - variables_set.update(set(list).difference(builtins_set)) - # - # compose constants set - constants_set = set() - # add each list of constants for this dialect excluding builtins - for list in self.stdlib_constants_db[dialect]: - constants_set.update(set(list).difference(builtins_set)) - # - # update lexer state - self.dialect = dialect - self.lexemes_to_reject = lexemes_to_reject_set - self.reserved_words = reswords_set - self.builtins = builtins_set - self.pseudo_builtins = pseudo_builtins_set - self.adts = adts_set - self.modules = modules_set - self.types = types_set - self.procedures = procedures_set - self.variables = variables_set - self.constants = constants_set - # - # if __debug__: - # print 'exiting set_dialect' - # print ' self.dialect: ', self.dialect - # print ' self.lexemes_to_reject: ', self.lexemes_to_reject - # print ' self.reserved_words: ', self.reserved_words - # print ' self.builtins: ', self.builtins - # print ' self.pseudo_builtins: ', self.pseudo_builtins - # print ' self.adts: ', self.adts - # print ' self.modules: ', self.modules - # print ' self.types: ', self.types - # print ' self.procedures: ', self.procedures - # print ' self.variables: ', self.variables - # print ' self.types: ', self.types - # print ' self.constants: ', self.constants - - # Extracts a dialect name from a dialect tag comment string and checks - # the extracted name against known dialects. If a match is found, the - # matching name is returned, otherwise dialect id 'unknown' is returned - def get_dialect_from_dialect_tag(self, dialect_tag): - # - # if __debug__: - # print 'entered get_dialect_from_dialect_tag with arg: ', dialect_tag - # - # constants - left_tag_delim = '(*!' - right_tag_delim = '*)' - left_tag_delim_len = len(left_tag_delim) - right_tag_delim_len = len(right_tag_delim) - indicator_start = left_tag_delim_len - indicator_end = -(right_tag_delim_len) - # - # check comment string for dialect indicator - if len(dialect_tag) > (left_tag_delim_len + right_tag_delim_len) \ - and dialect_tag.startswith(left_tag_delim) \ - and dialect_tag.endswith(right_tag_delim): - # - # if __debug__: - # print 'dialect tag found' - # - # extract dialect indicator - indicator = dialect_tag[indicator_start:indicator_end] - # - # if __debug__: - # print 'extracted: ', indicator - # - # check against known dialects - for index in range(1, len(self.dialects)): - # - # if __debug__: - # print 'dialects[', index, ']: ', self.dialects[index] - # - if indicator == self.dialects[index]: - # - # if __debug__: - # print 'matching dialect found' - # - # indicator matches known dialect - return indicator - else: - # indicator does not match any dialect - return 'unknown' # default - else: - # invalid indicator string - return 'unknown' # default - - # intercept the token stream, modify token attributes and return them - def get_tokens_unprocessed(self, text): - for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): - # - # check for dialect tag if dialect has not been set by tag - if not self.dialect_set_by_tag and token == Comment.Special: - indicated_dialect = self.get_dialect_from_dialect_tag(value) - if indicated_dialect != 'unknown': - # token is a dialect indicator - # reset reserved words and builtins - self.set_dialect(indicated_dialect) - self.dialect_set_by_tag = True - # - # check for reserved words, predefined and stdlib identifiers - if token is Name: - if value in self.reserved_words: - token = Keyword.Reserved - if self.algol_publication_mode: - value = value.lower() - # - elif value in self.builtins: - token = Name.Builtin - if self.algol_publication_mode: - value = value.lower() - # - elif value in self.pseudo_builtins: - token = Name.Builtin.Pseudo - if self.algol_publication_mode: - value = value.lower() - # - elif value in self.adts: - if not self.treat_stdlib_adts_as_builtins: - token = Name.Namespace - else: - token = Name.Builtin.Pseudo - if self.algol_publication_mode: - value = value.lower() - # - elif value in self.modules: - token = Name.Namespace - # - elif value in self.types: - token = Name.Class - # - elif value in self.procedures: - token = Name.Function - # - elif value in self.variables: - token = Name.Variable - # - elif value in self.constants: - token = Name.Constant - # - elif token in Number: - # - # mark prefix number literals as error for PIM and ISO dialects - if self.dialect not in ('unknown', 'm2r10', 'objm2'): - if "'" in value or value[0:2] in ('0b', '0x', '0u'): - token = Error - # - elif self.dialect in ('m2r10', 'objm2'): - # mark base-8 number literals as errors for M2 R10 and ObjM2 - if token is Number.Oct: - token = Error - # mark suffix base-16 literals as errors for M2 R10 and ObjM2 - elif token is Number.Hex and 'H' in value: - token = Error - # mark real numbers with E as errors for M2 R10 and ObjM2 - elif token is Number.Float and 'E' in value: - token = Error - # - elif token in Comment: - # - # mark single line comment as error for PIM and ISO dialects - if token is Comment.Single: - if self.dialect not in ('unknown', 'm2r10', 'objm2'): - token = Error - # - if token is Comment.Preproc: - # mark ISO pragma as error for PIM dialects - if value.startswith('<*') and \ - self.dialect.startswith('m2pim'): - token = Error - # mark PIM pragma as comment for other dialects - elif value.startswith('(*$') and \ - self.dialect != 'unknown' and \ - not self.dialect.startswith('m2pim'): - token = Comment.Multiline - # - else: # token is neither Name nor Comment - # - # mark lexemes matching the dialect's error token set as errors - if value in self.lexemes_to_reject: - token = Error - # - # substitute lexemes when in Algol mode - if self.algol_publication_mode: - if value == '#': - value = u'≠' - elif value == '<=': - value = u'≤' - elif value == '>=': - value = u'≥' - elif value == '==': - value = u'≡' - elif value == '*.': - value = u'•' - - # return result - yield index, token, value +# -*- coding: utf-8 -*- +""" + pygments.lexers.modula2 + ~~~~~~~~~~~~~~~~~~~~~~~ + + Multi-Dialect Lexer for Modula-2. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include +from pygments.util import get_bool_opt, get_list_opt +from pygments.token import Text, Comment, Operator, Keyword, Name, \ + String, Number, Punctuation, Error + +__all__ = ['Modula2Lexer'] + + +# Multi-Dialect Modula-2 Lexer +class Modula2Lexer(RegexLexer): + """ + For `Modula-2 `_ source code. + + The Modula-2 lexer supports several dialects. By default, it operates in + fallback mode, recognising the *combined* literals, punctuation symbols + and operators of all supported dialects, and the *combined* reserved words + and builtins of PIM Modula-2, ISO Modula-2 and Modula-2 R10, while not + differentiating between library defined identifiers. + + To select a specific dialect, a dialect option may be passed + or a dialect tag may be embedded into a source file. + + Dialect Options: + + `m2pim` + Select PIM Modula-2 dialect. + `m2iso` + Select ISO Modula-2 dialect. + `m2r10` + Select Modula-2 R10 dialect. + `objm2` + Select Objective Modula-2 dialect. + + The PIM and ISO dialect options may be qualified with a language extension. + + Language Extensions: + + `+aglet` + Select Aglet Modula-2 extensions, available with m2iso. + `+gm2` + Select GNU Modula-2 extensions, available with m2pim. + `+p1` + Select p1 Modula-2 extensions, available with m2iso. + `+xds` + Select XDS Modula-2 extensions, available with m2iso. + + + Passing a Dialect Option via Unix Commandline Interface + + Dialect options may be passed to the lexer using the `dialect` key. + Only one such option should be passed. If multiple dialect options are + passed, the first valid option is used, any subsequent options are ignored. + + Examples: + + `$ pygmentize -O full,dialect=m2iso -f html -o /path/to/output /path/to/input` + Use ISO dialect to render input to HTML output + `$ pygmentize -O full,dialect=m2iso+p1 -f rtf -o /path/to/output /path/to/input` + Use ISO dialect with p1 extensions to render input to RTF output + + + Embedding a Dialect Option within a source file + + A dialect option may be embedded in a source file in form of a dialect + tag, a specially formatted comment that specifies a dialect option. + + Dialect Tag EBNF:: + + dialectTag : + OpeningCommentDelim Prefix dialectOption ClosingCommentDelim ; + + dialectOption : + 'm2pim' | 'm2iso' | 'm2r10' | 'objm2' | + 'm2iso+aglet' | 'm2pim+gm2' | 'm2iso+p1' | 'm2iso+xds' ; + + Prefix : '!' ; + + OpeningCommentDelim : '(*' ; + + ClosingCommentDelim : '*)' ; + + No whitespace is permitted between the tokens of a dialect tag. + + In the event that a source file contains multiple dialect tags, the first + tag that contains a valid dialect option will be used and any subsequent + dialect tags will be ignored. Ideally, a dialect tag should be placed + at the beginning of a source file. + + An embedded dialect tag overrides a dialect option set via command line. + + Examples: + + ``(*!m2r10*) DEFINITION MODULE Foobar; ...`` + Use Modula2 R10 dialect to render this source file. + ``(*!m2pim+gm2*) DEFINITION MODULE Bazbam; ...`` + Use PIM dialect with GNU extensions to render this source file. + + + Algol Publication Mode: + + In Algol publication mode, source text is rendered for publication of + algorithms in scientific papers and academic texts, following the format + of the Revised Algol-60 Language Report. It is activated by passing + one of two corresponding styles as an option: + + `algol` + render reserved words lowercase underline boldface + and builtins lowercase boldface italic + `algol_nu` + render reserved words lowercase boldface (no underlining) + and builtins lowercase boldface italic + + The lexer automatically performs the required lowercase conversion when + this mode is activated. + + Example: + + ``$ pygmentize -O full,style=algol -f latex -o /path/to/output /path/to/input`` + Render input file in Algol publication mode to LaTeX output. + + + Rendering Mode of First Class ADT Identifiers: + + The rendering of standard library first class ADT identifiers is controlled + by option flag "treat_stdlib_adts_as_builtins". + + When this option is turned on, standard library ADT identifiers are rendered + as builtins. When it is turned off, they are rendered as ordinary library + identifiers. + + `treat_stdlib_adts_as_builtins` (default: On) + + The option is useful for dialects that support ADTs as first class objects + and provide ADTs in the standard library that would otherwise be built-in. + + At present, only Modula-2 R10 supports library ADTs as first class objects + and therefore, no ADT identifiers are defined for any other dialects. + + Example: + + ``$ pygmentize -O full,dialect=m2r10,treat_stdlib_adts_as_builtins=Off ...`` + Render standard library ADTs as ordinary library types. + + .. versionadded:: 1.3 + + .. versionchanged:: 2.1 + Added multi-dialect support. + """ + name = 'Modula-2' + aliases = ['modula2', 'm2'] + filenames = ['*.def', '*.mod'] + mimetypes = ['text/x-modula2'] + + flags = re.MULTILINE | re.DOTALL + + tokens = { + 'whitespace': [ + (r'\n+', Text), # blank lines + (r'\s+', Text), # whitespace + ], + 'dialecttags': [ + # PIM Dialect Tag + (r'\(\*!m2pim\*\)', Comment.Special), + # ISO Dialect Tag + (r'\(\*!m2iso\*\)', Comment.Special), + # M2R10 Dialect Tag + (r'\(\*!m2r10\*\)', Comment.Special), + # ObjM2 Dialect Tag + (r'\(\*!objm2\*\)', Comment.Special), + # Aglet Extensions Dialect Tag + (r'\(\*!m2iso\+aglet\*\)', Comment.Special), + # GNU Extensions Dialect Tag + (r'\(\*!m2pim\+gm2\*\)', Comment.Special), + # p1 Extensions Dialect Tag + (r'\(\*!m2iso\+p1\*\)', Comment.Special), + # XDS Extensions Dialect Tag + (r'\(\*!m2iso\+xds\*\)', Comment.Special), + ], + 'identifiers': [ + (r'([a-zA-Z_$][\w$]*)', Name), + ], + 'prefixed_number_literals': [ + # + # Base-2, whole number + (r'0b[01]+(\'[01]+)*', Number.Bin), + # + # Base-16, whole number + (r'0[ux][0-9A-F]+(\'[0-9A-F]+)*', Number.Hex), + ], + 'plain_number_literals': [ + # + # Base-10, real number with exponent + (r'[0-9]+(\'[0-9]+)*' # integral part + r'\.[0-9]+(\'[0-9]+)*' # fractional part + r'[eE][+-]?[0-9]+(\'[0-9]+)*', # exponent + Number.Float), + # + # Base-10, real number without exponent + (r'[0-9]+(\'[0-9]+)*' # integral part + r'\.[0-9]+(\'[0-9]+)*', # fractional part + Number.Float), + # + # Base-10, whole number + (r'[0-9]+(\'[0-9]+)*', Number.Integer), + ], + 'suffixed_number_literals': [ + # + # Base-8, whole number + (r'[0-7]+B', Number.Oct), + # + # Base-8, character code + (r'[0-7]+C', Number.Oct), + # + # Base-16, number + (r'[0-9A-F]+H', Number.Hex), + ], + 'string_literals': [ + (r"'(\\\\|\\'|[^'])*'", String), # single quoted string + (r'"(\\\\|\\"|[^"])*"', String), # double quoted string + ], + 'digraph_operators': [ + # Dot Product Operator + (r'\*\.', Operator), + # Array Concatenation Operator + (r'\+>', Operator), # M2R10 + ObjM2 + # Inequality Operator + (r'<>', Operator), # ISO + PIM + # Less-Or-Equal, Subset + (r'<=', Operator), + # Greater-Or-Equal, Superset + (r'>=', Operator), + # Identity Operator + (r'==', Operator), # M2R10 + ObjM2 + # Type Conversion Operator + (r'::', Operator), # M2R10 + ObjM2 + # Assignment Symbol + (r':=', Operator), + # Postfix Increment Mutator + (r'\+\+', Operator), # M2R10 + ObjM2 + # Postfix Decrement Mutator + (r'--', Operator), # M2R10 + ObjM2 + ], + 'unigraph_operators': [ + # Arithmetic Operators + (r'[+-]', Operator), + (r'[*/]', Operator), + # ISO 80000-2 compliant Set Difference Operator + (r'\\', Operator), # M2R10 + ObjM2 + # Relational Operators + (r'[=#<>]', Operator), + # Dereferencing Operator + (r'\^', Operator), + # Dereferencing Operator Synonym + (r'@', Operator), # ISO + # Logical AND Operator Synonym + (r'&', Operator), # PIM + ISO + # Logical NOT Operator Synonym + (r'~', Operator), # PIM + ISO + # Smalltalk Message Prefix + (r'`', Operator), # ObjM2 + ], + 'digraph_punctuation': [ + # Range Constructor + (r'\.\.', Punctuation), + # Opening Chevron Bracket + (r'<<', Punctuation), # M2R10 + ISO + # Closing Chevron Bracket + (r'>>', Punctuation), # M2R10 + ISO + # Blueprint Punctuation + (r'->', Punctuation), # M2R10 + ISO + # Distinguish |# and # in M2 R10 + (r'\|#', Punctuation), + # Distinguish ## and # in M2 R10 + (r'##', Punctuation), + # Distinguish |* and * in M2 R10 + (r'\|\*', Punctuation), + ], + 'unigraph_punctuation': [ + # Common Punctuation + (r'[()\[\]{},.:;|]', Punctuation), + # Case Label Separator Synonym + (r'!', Punctuation), # ISO + # Blueprint Punctuation + (r'\?', Punctuation), # M2R10 + ObjM2 + ], + 'comments': [ + # Single Line Comment + (r'^//.*?\n', Comment.Single), # M2R10 + ObjM2 + # Block Comment + (r'\(\*([^$].*?)\*\)', Comment.Multiline), + # Template Block Comment + (r'/\*(.*?)\*/', Comment.Multiline), # M2R10 + ObjM2 + ], + 'pragmas': [ + # ISO Style Pragmas + (r'<\*.*?\*>', Comment.Preproc), # ISO, M2R10 + ObjM2 + # Pascal Style Pragmas + (r'\(\*\$.*?\*\)', Comment.Preproc), # PIM + ], + 'root': [ + include('whitespace'), + include('dialecttags'), + include('pragmas'), + include('comments'), + include('identifiers'), + include('suffixed_number_literals'), # PIM + ISO + include('prefixed_number_literals'), # M2R10 + ObjM2 + include('plain_number_literals'), + include('string_literals'), + include('digraph_punctuation'), + include('digraph_operators'), + include('unigraph_punctuation'), + include('unigraph_operators'), + ] + } + +# C o m m o n D a t a s e t s + + # Common Reserved Words Dataset + common_reserved_words = ( + # 37 common reserved words + 'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV', + 'DO', 'ELSE', 'ELSIF', 'END', 'EXIT', 'FOR', 'FROM', 'IF', + 'IMPLEMENTATION', 'IMPORT', 'IN', 'LOOP', 'MOD', 'MODULE', 'NOT', + 'OF', 'OR', 'POINTER', 'PROCEDURE', 'RECORD', 'REPEAT', 'RETURN', + 'SET', 'THEN', 'TO', 'TYPE', 'UNTIL', 'VAR', 'WHILE', + ) + + # Common Builtins Dataset + common_builtins = ( + # 16 common builtins + 'ABS', 'BOOLEAN', 'CARDINAL', 'CHAR', 'CHR', 'FALSE', 'INTEGER', + 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NIL', 'ODD', 'ORD', 'REAL', + 'TRUE', + ) + + # Common Pseudo-Module Builtins Dataset + common_pseudo_builtins = ( + # 4 common pseudo builtins + 'ADDRESS', 'BYTE', 'WORD', 'ADR' + ) + +# P I M M o d u l a - 2 D a t a s e t s + + # Lexemes to Mark as Error Tokens for PIM Modula-2 + pim_lexemes_to_reject = ( + '!', '`', '@', '$', '%', '?', '\\', '==', '++', '--', '::', '*.', + '+>', '->', '<<', '>>', '|#', '##', + ) + + # PIM Modula-2 Additional Reserved Words Dataset + pim_additional_reserved_words = ( + # 3 additional reserved words + 'EXPORT', 'QUALIFIED', 'WITH', + ) + + # PIM Modula-2 Additional Builtins Dataset + pim_additional_builtins = ( + # 16 additional builtins + 'BITSET', 'CAP', 'DEC', 'DISPOSE', 'EXCL', 'FLOAT', 'HALT', 'HIGH', + 'INC', 'INCL', 'NEW', 'NIL', 'PROC', 'SIZE', 'TRUNC', 'VAL', + ) + + # PIM Modula-2 Additional Pseudo-Module Builtins Dataset + pim_additional_pseudo_builtins = ( + # 5 additional pseudo builtins + 'SYSTEM', 'PROCESS', 'TSIZE', 'NEWPROCESS', 'TRANSFER', + ) + +# I S O M o d u l a - 2 D a t a s e t s + + # Lexemes to Mark as Error Tokens for ISO Modula-2 + iso_lexemes_to_reject = ( + '`', '$', '%', '?', '\\', '==', '++', '--', '::', '*.', '+>', '->', + '<<', '>>', '|#', '##', + ) + + # ISO Modula-2 Additional Reserved Words Dataset + iso_additional_reserved_words = ( + # 9 additional reserved words (ISO 10514-1) + 'EXCEPT', 'EXPORT', 'FINALLY', 'FORWARD', 'PACKEDSET', 'QUALIFIED', + 'REM', 'RETRY', 'WITH', + # 10 additional reserved words (ISO 10514-2 & ISO 10514-3) + 'ABSTRACT', 'AS', 'CLASS', 'GUARD', 'INHERIT', 'OVERRIDE', 'READONLY', + 'REVEAL', 'TRACED', 'UNSAFEGUARDED', + ) + + # ISO Modula-2 Additional Builtins Dataset + iso_additional_builtins = ( + # 26 additional builtins (ISO 10514-1) + 'BITSET', 'CAP', 'CMPLX', 'COMPLEX', 'DEC', 'DISPOSE', 'EXCL', 'FLOAT', + 'HALT', 'HIGH', 'IM', 'INC', 'INCL', 'INT', 'INTERRUPTIBLE', 'LENGTH', + 'LFLOAT', 'LONGCOMPLEX', 'NEW', 'PROC', 'PROTECTION', 'RE', 'SIZE', + 'TRUNC', 'UNINTERRUBTIBLE', 'VAL', + # 5 additional builtins (ISO 10514-2 & ISO 10514-3) + 'CREATE', 'DESTROY', 'EMPTY', 'ISMEMBER', 'SELF', + ) + + # ISO Modula-2 Additional Pseudo-Module Builtins Dataset + iso_additional_pseudo_builtins = ( + # 14 additional builtins (SYSTEM) + 'SYSTEM', 'BITSPERLOC', 'LOCSPERBYTE', 'LOCSPERWORD', 'LOC', + 'ADDADR', 'SUBADR', 'DIFADR', 'MAKEADR', 'ADR', + 'ROTATE', 'SHIFT', 'CAST', 'TSIZE', + # 13 additional builtins (COROUTINES) + 'COROUTINES', 'ATTACH', 'COROUTINE', 'CURRENT', 'DETACH', 'HANDLER', + 'INTERRUPTSOURCE', 'IOTRANSFER', 'IsATTACHED', 'LISTEN', + 'NEWCOROUTINE', 'PROT', 'TRANSFER', + # 9 additional builtins (EXCEPTIONS) + 'EXCEPTIONS', 'AllocateSource', 'CurrentNumber', 'ExceptionNumber', + 'ExceptionSource', 'GetMessage', 'IsCurrentSource', + 'IsExceptionalExecution', 'RAISE', + # 3 additional builtins (TERMINATION) + 'TERMINATION', 'IsTerminating', 'HasHalted', + # 4 additional builtins (M2EXCEPTION) + 'M2EXCEPTION', 'M2Exceptions', 'M2Exception', 'IsM2Exception', + 'indexException', 'rangeException', 'caseSelectException', + 'invalidLocation', 'functionException', 'wholeValueException', + 'wholeDivException', 'realValueException', 'realDivException', + 'complexValueException', 'complexDivException', 'protException', + 'sysException', 'coException', 'exException', + ) + +# M o d u l a - 2 R 1 0 D a t a s e t s + + # Lexemes to Mark as Error Tokens for Modula-2 R10 + m2r10_lexemes_to_reject = ( + '!', '`', '@', '$', '%', '&', '<>', + ) + + # Modula-2 R10 reserved words in addition to the common set + m2r10_additional_reserved_words = ( + # 12 additional reserved words + 'ALIAS', 'ARGLIST', 'BLUEPRINT', 'COPY', 'GENLIB', 'INDETERMINATE', + 'NEW', 'NONE', 'OPAQUE', 'REFERENTIAL', 'RELEASE', 'RETAIN', + # 2 additional reserved words with symbolic assembly option + 'ASM', 'REG', + ) + + # Modula-2 R10 builtins in addition to the common set + m2r10_additional_builtins = ( + # 26 additional builtins + 'CARDINAL', 'COUNT', 'EMPTY', 'EXISTS', 'INSERT', 'LENGTH', 'LONGCARD', + 'OCTET', 'PTR', 'PRED', 'READ', 'READNEW', 'REMOVE', 'RETRIEVE', 'SORT', + 'STORE', 'SUBSET', 'SUCC', 'TLIMIT', 'TMAX', 'TMIN', 'TRUE', 'TSIZE', + 'UNICHAR', 'WRITE', 'WRITEF', + ) + + # Modula-2 R10 Additional Pseudo-Module Builtins Dataset + m2r10_additional_pseudo_builtins = ( + # 13 additional builtins (TPROPERTIES) + 'TPROPERTIES', 'PROPERTY', 'LITERAL', 'TPROPERTY', 'TLITERAL', + 'TBUILTIN', 'TDYN', 'TREFC', 'TNIL', 'TBASE', 'TPRECISION', + 'TMAXEXP', 'TMINEXP', + # 4 additional builtins (CONVERSION) + 'CONVERSION', 'TSXFSIZE', 'SXF', 'VAL', + # 35 additional builtins (UNSAFE) + 'UNSAFE', 'CAST', 'INTRINSIC', 'AVAIL', 'ADD', 'SUB', 'ADDC', 'SUBC', + 'FETCHADD', 'FETCHSUB', 'SHL', 'SHR', 'ASHR', 'ROTL', 'ROTR', 'ROTLC', + 'ROTRC', 'BWNOT', 'BWAND', 'BWOR', 'BWXOR', 'BWNAND', 'BWNOR', + 'SETBIT', 'TESTBIT', 'LSBIT', 'MSBIT', 'CSBITS', 'BAIL', 'HALT', + 'TODO', 'FFI', 'ADDR', 'VARGLIST', 'VARGC', + # 11 additional builtins (ATOMIC) + 'ATOMIC', 'INTRINSIC', 'AVAIL', 'SWAP', 'CAS', 'INC', 'DEC', 'BWAND', + 'BWNAND', 'BWOR', 'BWXOR', + # 7 additional builtins (COMPILER) + 'COMPILER', 'DEBUG', 'MODNAME', 'PROCNAME', 'LINENUM', 'DEFAULT', + 'HASH', + # 5 additional builtins (ASSEMBLER) + 'ASSEMBLER', 'REGISTER', 'SETREG', 'GETREG', 'CODE', + ) + +# O b j e c t i v e M o d u l a - 2 D a t a s e t s + + # Lexemes to Mark as Error Tokens for Objective Modula-2 + objm2_lexemes_to_reject = ( + '!', '$', '%', '&', '<>', + ) + + # Objective Modula-2 Extensions + # reserved words in addition to Modula-2 R10 + objm2_additional_reserved_words = ( + # 16 additional reserved words + 'BYCOPY', 'BYREF', 'CLASS', 'CONTINUE', 'CRITICAL', 'INOUT', 'METHOD', + 'ON', 'OPTIONAL', 'OUT', 'PRIVATE', 'PROTECTED', 'PROTOCOL', 'PUBLIC', + 'SUPER', 'TRY', + ) + + # Objective Modula-2 Extensions + # builtins in addition to Modula-2 R10 + objm2_additional_builtins = ( + # 3 additional builtins + 'OBJECT', 'NO', 'YES', + ) + + # Objective Modula-2 Extensions + # pseudo-module builtins in addition to Modula-2 R10 + objm2_additional_pseudo_builtins = ( + # None + ) + +# A g l e t M o d u l a - 2 D a t a s e t s + + # Aglet Extensions + # reserved words in addition to ISO Modula-2 + aglet_additional_reserved_words = ( + # None + ) + + # Aglet Extensions + # builtins in addition to ISO Modula-2 + aglet_additional_builtins = ( + # 9 additional builtins + 'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16', + 'CARDINAL32', 'INTEGER8', 'INTEGER16', 'INTEGER32', + ) + + # Aglet Modula-2 Extensions + # pseudo-module builtins in addition to ISO Modula-2 + aglet_additional_pseudo_builtins = ( + # None + ) + +# G N U M o d u l a - 2 D a t a s e t s + + # GNU Extensions + # reserved words in addition to PIM Modula-2 + gm2_additional_reserved_words = ( + # 10 additional reserved words + 'ASM', '__ATTRIBUTE__', '__BUILTIN__', '__COLUMN__', '__DATE__', + '__FILE__', '__FUNCTION__', '__LINE__', '__MODULE__', 'VOLATILE', + ) + + # GNU Extensions + # builtins in addition to PIM Modula-2 + gm2_additional_builtins = ( + # 21 additional builtins + 'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16', + 'CARDINAL32', 'CARDINAL64', 'COMPLEX32', 'COMPLEX64', 'COMPLEX96', + 'COMPLEX128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64', + 'REAL8', 'REAL16', 'REAL32', 'REAL96', 'REAL128', 'THROW', + ) + + # GNU Extensions + # pseudo-module builtins in addition to PIM Modula-2 + gm2_additional_pseudo_builtins = ( + # None + ) + +# p 1 M o d u l a - 2 D a t a s e t s + + # p1 Extensions + # reserved words in addition to ISO Modula-2 + p1_additional_reserved_words = ( + # None + ) + + # p1 Extensions + # builtins in addition to ISO Modula-2 + p1_additional_builtins = ( + # None + ) + + # p1 Modula-2 Extensions + # pseudo-module builtins in addition to ISO Modula-2 + p1_additional_pseudo_builtins = ( + # 1 additional builtin + 'BCD', + ) + +# X D S M o d u l a - 2 D a t a s e t s + + # XDS Extensions + # reserved words in addition to ISO Modula-2 + xds_additional_reserved_words = ( + # 1 additional reserved word + 'SEQ', + ) + + # XDS Extensions + # builtins in addition to ISO Modula-2 + xds_additional_builtins = ( + # 9 additional builtins + 'ASH', 'ASSERT', 'DIFFADR_TYPE', 'ENTIER', 'INDEX', 'LEN', + 'LONGCARD', 'SHORTCARD', 'SHORTINT', + ) + + # XDS Modula-2 Extensions + # pseudo-module builtins in addition to ISO Modula-2 + xds_additional_pseudo_builtins = ( + # 22 additional builtins (SYSTEM) + 'PROCESS', 'NEWPROCESS', 'BOOL8', 'BOOL16', 'BOOL32', 'CARD8', + 'CARD16', 'CARD32', 'INT8', 'INT16', 'INT32', 'REF', 'MOVE', + 'FILL', 'GET', 'PUT', 'CC', 'int', 'unsigned', 'size_t', 'void' + # 3 additional builtins (COMPILER) + 'COMPILER', 'OPTION', 'EQUATION' + ) + +# P I M S t a n d a r d L i b r a r y D a t a s e t s + + # PIM Modula-2 Standard Library Modules Dataset + pim_stdlib_module_identifiers = ( + 'Terminal', 'FileSystem', 'InOut', 'RealInOut', 'MathLib0', 'Storage', + ) + + # PIM Modula-2 Standard Library Types Dataset + pim_stdlib_type_identifiers = ( + 'Flag', 'FlagSet', 'Response', 'Command', 'Lock', 'Permission', + 'MediumType', 'File', 'FileProc', 'DirectoryProc', 'FileCommand', + 'DirectoryCommand', + ) + + # PIM Modula-2 Standard Library Procedures Dataset + pim_stdlib_proc_identifiers = ( + 'Read', 'BusyRead', 'ReadAgain', 'Write', 'WriteString', 'WriteLn', + 'Create', 'Lookup', 'Close', 'Delete', 'Rename', 'SetRead', 'SetWrite', + 'SetModify', 'SetOpen', 'Doio', 'SetPos', 'GetPos', 'Length', 'Reset', + 'Again', 'ReadWord', 'WriteWord', 'ReadChar', 'WriteChar', + 'CreateMedium', 'DeleteMedium', 'AssignName', 'DeassignName', + 'ReadMedium', 'LookupMedium', 'OpenInput', 'OpenOutput', 'CloseInput', + 'CloseOutput', 'ReadString', 'ReadInt', 'ReadCard', 'ReadWrd', + 'WriteInt', 'WriteCard', 'WriteOct', 'WriteHex', 'WriteWrd', + 'ReadReal', 'WriteReal', 'WriteFixPt', 'WriteRealOct', 'sqrt', 'exp', + 'ln', 'sin', 'cos', 'arctan', 'entier', 'ALLOCATE', 'DEALLOCATE', + ) + + # PIM Modula-2 Standard Library Variables Dataset + pim_stdlib_var_identifiers = ( + 'Done', 'termCH', 'in', 'out' + ) + + # PIM Modula-2 Standard Library Constants Dataset + pim_stdlib_const_identifiers = ( + 'EOL', + ) + +# I S O S t a n d a r d L i b r a r y D a t a s e t s + + # ISO Modula-2 Standard Library Modules Dataset + iso_stdlib_module_identifiers = ( + # TO DO + ) + + # ISO Modula-2 Standard Library Types Dataset + iso_stdlib_type_identifiers = ( + # TO DO + ) + + # ISO Modula-2 Standard Library Procedures Dataset + iso_stdlib_proc_identifiers = ( + # TO DO + ) + + # ISO Modula-2 Standard Library Variables Dataset + iso_stdlib_var_identifiers = ( + # TO DO + ) + + # ISO Modula-2 Standard Library Constants Dataset + iso_stdlib_const_identifiers = ( + # TO DO + ) + +# M 2 R 1 0 S t a n d a r d L i b r a r y D a t a s e t s + + # Modula-2 R10 Standard Library ADTs Dataset + m2r10_stdlib_adt_identifiers = ( + 'BCD', 'LONGBCD', 'BITSET', 'SHORTBITSET', 'LONGBITSET', + 'LONGLONGBITSET', 'COMPLEX', 'LONGCOMPLEX', 'SHORTCARD', 'LONGLONGCARD', + 'SHORTINT', 'LONGLONGINT', 'POSINT', 'SHORTPOSINT', 'LONGPOSINT', + 'LONGLONGPOSINT', 'BITSET8', 'BITSET16', 'BITSET32', 'BITSET64', + 'BITSET128', 'BS8', 'BS16', 'BS32', 'BS64', 'BS128', 'CARDINAL8', + 'CARDINAL16', 'CARDINAL32', 'CARDINAL64', 'CARDINAL128', 'CARD8', + 'CARD16', 'CARD32', 'CARD64', 'CARD128', 'INTEGER8', 'INTEGER16', + 'INTEGER32', 'INTEGER64', 'INTEGER128', 'INT8', 'INT16', 'INT32', + 'INT64', 'INT128', 'STRING', 'UNISTRING', + ) + + # Modula-2 R10 Standard Library Blueprints Dataset + m2r10_stdlib_blueprint_identifiers = ( + 'ProtoRoot', 'ProtoComputational', 'ProtoNumeric', 'ProtoScalar', + 'ProtoNonScalar', 'ProtoCardinal', 'ProtoInteger', 'ProtoReal', + 'ProtoComplex', 'ProtoVector', 'ProtoTuple', 'ProtoCompArray', + 'ProtoCollection', 'ProtoStaticArray', 'ProtoStaticSet', + 'ProtoStaticString', 'ProtoArray', 'ProtoString', 'ProtoSet', + 'ProtoMultiSet', 'ProtoDictionary', 'ProtoMultiDict', 'ProtoExtension', + 'ProtoIO', 'ProtoCardMath', 'ProtoIntMath', 'ProtoRealMath', + ) + + # Modula-2 R10 Standard Library Modules Dataset + m2r10_stdlib_module_identifiers = ( + 'ASCII', 'BooleanIO', 'CharIO', 'UnicharIO', 'OctetIO', + 'CardinalIO', 'LongCardIO', 'IntegerIO', 'LongIntIO', 'RealIO', + 'LongRealIO', 'BCDIO', 'LongBCDIO', 'CardMath', 'LongCardMath', + 'IntMath', 'LongIntMath', 'RealMath', 'LongRealMath', 'BCDMath', + 'LongBCDMath', 'FileIO', 'FileSystem', 'Storage', 'IOSupport', + ) + + # Modula-2 R10 Standard Library Types Dataset + m2r10_stdlib_type_identifiers = ( + 'File', 'Status', + # TO BE COMPLETED + ) + + # Modula-2 R10 Standard Library Procedures Dataset + m2r10_stdlib_proc_identifiers = ( + 'ALLOCATE', 'DEALLOCATE', 'SIZE', + # TO BE COMPLETED + ) + + # Modula-2 R10 Standard Library Variables Dataset + m2r10_stdlib_var_identifiers = ( + 'stdIn', 'stdOut', 'stdErr', + ) + + # Modula-2 R10 Standard Library Constants Dataset + m2r10_stdlib_const_identifiers = ( + 'pi', 'tau', + ) + +# D i a l e c t s + + # Dialect modes + dialects = ( + 'unknown', + 'm2pim', 'm2iso', 'm2r10', 'objm2', + 'm2iso+aglet', 'm2pim+gm2', 'm2iso+p1', 'm2iso+xds', + ) + +# D a t a b a s e s + + # Lexemes to Mark as Errors Database + lexemes_to_reject_db = { + # Lexemes to reject for unknown dialect + 'unknown': ( + # LEAVE THIS EMPTY + ), + # Lexemes to reject for PIM Modula-2 + 'm2pim': ( + pim_lexemes_to_reject, + ), + # Lexemes to reject for ISO Modula-2 + 'm2iso': ( + iso_lexemes_to_reject, + ), + # Lexemes to reject for Modula-2 R10 + 'm2r10': ( + m2r10_lexemes_to_reject, + ), + # Lexemes to reject for Objective Modula-2 + 'objm2': ( + objm2_lexemes_to_reject, + ), + # Lexemes to reject for Aglet Modula-2 + 'm2iso+aglet': ( + iso_lexemes_to_reject, + ), + # Lexemes to reject for GNU Modula-2 + 'm2pim+gm2': ( + pim_lexemes_to_reject, + ), + # Lexemes to reject for p1 Modula-2 + 'm2iso+p1': ( + iso_lexemes_to_reject, + ), + # Lexemes to reject for XDS Modula-2 + 'm2iso+xds': ( + iso_lexemes_to_reject, + ), + } + + # Reserved Words Database + reserved_words_db = { + # Reserved words for unknown dialect + 'unknown': ( + common_reserved_words, + pim_additional_reserved_words, + iso_additional_reserved_words, + m2r10_additional_reserved_words, + ), + + # Reserved words for PIM Modula-2 + 'm2pim': ( + common_reserved_words, + pim_additional_reserved_words, + ), + + # Reserved words for Modula-2 R10 + 'm2iso': ( + common_reserved_words, + iso_additional_reserved_words, + ), + + # Reserved words for ISO Modula-2 + 'm2r10': ( + common_reserved_words, + m2r10_additional_reserved_words, + ), + + # Reserved words for Objective Modula-2 + 'objm2': ( + common_reserved_words, + m2r10_additional_reserved_words, + objm2_additional_reserved_words, + ), + + # Reserved words for Aglet Modula-2 Extensions + 'm2iso+aglet': ( + common_reserved_words, + iso_additional_reserved_words, + aglet_additional_reserved_words, + ), + + # Reserved words for GNU Modula-2 Extensions + 'm2pim+gm2': ( + common_reserved_words, + pim_additional_reserved_words, + gm2_additional_reserved_words, + ), + + # Reserved words for p1 Modula-2 Extensions + 'm2iso+p1': ( + common_reserved_words, + iso_additional_reserved_words, + p1_additional_reserved_words, + ), + + # Reserved words for XDS Modula-2 Extensions + 'm2iso+xds': ( + common_reserved_words, + iso_additional_reserved_words, + xds_additional_reserved_words, + ), + } + + # Builtins Database + builtins_db = { + # Builtins for unknown dialect + 'unknown': ( + common_builtins, + pim_additional_builtins, + iso_additional_builtins, + m2r10_additional_builtins, + ), + + # Builtins for PIM Modula-2 + 'm2pim': ( + common_builtins, + pim_additional_builtins, + ), + + # Builtins for ISO Modula-2 + 'm2iso': ( + common_builtins, + iso_additional_builtins, + ), + + # Builtins for ISO Modula-2 + 'm2r10': ( + common_builtins, + m2r10_additional_builtins, + ), + + # Builtins for Objective Modula-2 + 'objm2': ( + common_builtins, + m2r10_additional_builtins, + objm2_additional_builtins, + ), + + # Builtins for Aglet Modula-2 Extensions + 'm2iso+aglet': ( + common_builtins, + iso_additional_builtins, + aglet_additional_builtins, + ), + + # Builtins for GNU Modula-2 Extensions + 'm2pim+gm2': ( + common_builtins, + pim_additional_builtins, + gm2_additional_builtins, + ), + + # Builtins for p1 Modula-2 Extensions + 'm2iso+p1': ( + common_builtins, + iso_additional_builtins, + p1_additional_builtins, + ), + + # Builtins for XDS Modula-2 Extensions + 'm2iso+xds': ( + common_builtins, + iso_additional_builtins, + xds_additional_builtins, + ), + } + + # Pseudo-Module Builtins Database + pseudo_builtins_db = { + # Builtins for unknown dialect + 'unknown': ( + common_pseudo_builtins, + pim_additional_pseudo_builtins, + iso_additional_pseudo_builtins, + m2r10_additional_pseudo_builtins, + ), + + # Builtins for PIM Modula-2 + 'm2pim': ( + common_pseudo_builtins, + pim_additional_pseudo_builtins, + ), + + # Builtins for ISO Modula-2 + 'm2iso': ( + common_pseudo_builtins, + iso_additional_pseudo_builtins, + ), + + # Builtins for ISO Modula-2 + 'm2r10': ( + common_pseudo_builtins, + m2r10_additional_pseudo_builtins, + ), + + # Builtins for Objective Modula-2 + 'objm2': ( + common_pseudo_builtins, + m2r10_additional_pseudo_builtins, + objm2_additional_pseudo_builtins, + ), + + # Builtins for Aglet Modula-2 Extensions + 'm2iso+aglet': ( + common_pseudo_builtins, + iso_additional_pseudo_builtins, + aglet_additional_pseudo_builtins, + ), + + # Builtins for GNU Modula-2 Extensions + 'm2pim+gm2': ( + common_pseudo_builtins, + pim_additional_pseudo_builtins, + gm2_additional_pseudo_builtins, + ), + + # Builtins for p1 Modula-2 Extensions + 'm2iso+p1': ( + common_pseudo_builtins, + iso_additional_pseudo_builtins, + p1_additional_pseudo_builtins, + ), + + # Builtins for XDS Modula-2 Extensions + 'm2iso+xds': ( + common_pseudo_builtins, + iso_additional_pseudo_builtins, + xds_additional_pseudo_builtins, + ), + } + + # Standard Library ADTs Database + stdlib_adts_db = { + # Empty entry for unknown dialect + 'unknown': ( + # LEAVE THIS EMPTY + ), + # Standard Library ADTs for PIM Modula-2 + 'm2pim': ( + # No first class library types + ), + + # Standard Library ADTs for ISO Modula-2 + 'm2iso': ( + # No first class library types + ), + + # Standard Library ADTs for Modula-2 R10 + 'm2r10': ( + m2r10_stdlib_adt_identifiers, + ), + + # Standard Library ADTs for Objective Modula-2 + 'objm2': ( + m2r10_stdlib_adt_identifiers, + ), + + # Standard Library ADTs for Aglet Modula-2 + 'm2iso+aglet': ( + # No first class library types + ), + + # Standard Library ADTs for GNU Modula-2 + 'm2pim+gm2': ( + # No first class library types + ), + + # Standard Library ADTs for p1 Modula-2 + 'm2iso+p1': ( + # No first class library types + ), + + # Standard Library ADTs for XDS Modula-2 + 'm2iso+xds': ( + # No first class library types + ), + } + + # Standard Library Modules Database + stdlib_modules_db = { + # Empty entry for unknown dialect + 'unknown': ( + # LEAVE THIS EMPTY + ), + # Standard Library Modules for PIM Modula-2 + 'm2pim': ( + pim_stdlib_module_identifiers, + ), + + # Standard Library Modules for ISO Modula-2 + 'm2iso': ( + iso_stdlib_module_identifiers, + ), + + # Standard Library Modules for Modula-2 R10 + 'm2r10': ( + m2r10_stdlib_blueprint_identifiers, + m2r10_stdlib_module_identifiers, + m2r10_stdlib_adt_identifiers, + ), + + # Standard Library Modules for Objective Modula-2 + 'objm2': ( + m2r10_stdlib_blueprint_identifiers, + m2r10_stdlib_module_identifiers, + ), + + # Standard Library Modules for Aglet Modula-2 + 'm2iso+aglet': ( + iso_stdlib_module_identifiers, + ), + + # Standard Library Modules for GNU Modula-2 + 'm2pim+gm2': ( + pim_stdlib_module_identifiers, + ), + + # Standard Library Modules for p1 Modula-2 + 'm2iso+p1': ( + iso_stdlib_module_identifiers, + ), + + # Standard Library Modules for XDS Modula-2 + 'm2iso+xds': ( + iso_stdlib_module_identifiers, + ), + } + + # Standard Library Types Database + stdlib_types_db = { + # Empty entry for unknown dialect + 'unknown': ( + # LEAVE THIS EMPTY + ), + # Standard Library Types for PIM Modula-2 + 'm2pim': ( + pim_stdlib_type_identifiers, + ), + + # Standard Library Types for ISO Modula-2 + 'm2iso': ( + iso_stdlib_type_identifiers, + ), + + # Standard Library Types for Modula-2 R10 + 'm2r10': ( + m2r10_stdlib_type_identifiers, + ), + + # Standard Library Types for Objective Modula-2 + 'objm2': ( + m2r10_stdlib_type_identifiers, + ), + + # Standard Library Types for Aglet Modula-2 + 'm2iso+aglet': ( + iso_stdlib_type_identifiers, + ), + + # Standard Library Types for GNU Modula-2 + 'm2pim+gm2': ( + pim_stdlib_type_identifiers, + ), + + # Standard Library Types for p1 Modula-2 + 'm2iso+p1': ( + iso_stdlib_type_identifiers, + ), + + # Standard Library Types for XDS Modula-2 + 'm2iso+xds': ( + iso_stdlib_type_identifiers, + ), + } + + # Standard Library Procedures Database + stdlib_procedures_db = { + # Empty entry for unknown dialect + 'unknown': ( + # LEAVE THIS EMPTY + ), + # Standard Library Procedures for PIM Modula-2 + 'm2pim': ( + pim_stdlib_proc_identifiers, + ), + + # Standard Library Procedures for ISO Modula-2 + 'm2iso': ( + iso_stdlib_proc_identifiers, + ), + + # Standard Library Procedures for Modula-2 R10 + 'm2r10': ( + m2r10_stdlib_proc_identifiers, + ), + + # Standard Library Procedures for Objective Modula-2 + 'objm2': ( + m2r10_stdlib_proc_identifiers, + ), + + # Standard Library Procedures for Aglet Modula-2 + 'm2iso+aglet': ( + iso_stdlib_proc_identifiers, + ), + + # Standard Library Procedures for GNU Modula-2 + 'm2pim+gm2': ( + pim_stdlib_proc_identifiers, + ), + + # Standard Library Procedures for p1 Modula-2 + 'm2iso+p1': ( + iso_stdlib_proc_identifiers, + ), + + # Standard Library Procedures for XDS Modula-2 + 'm2iso+xds': ( + iso_stdlib_proc_identifiers, + ), + } + + # Standard Library Variables Database + stdlib_variables_db = { + # Empty entry for unknown dialect + 'unknown': ( + # LEAVE THIS EMPTY + ), + # Standard Library Variables for PIM Modula-2 + 'm2pim': ( + pim_stdlib_var_identifiers, + ), + + # Standard Library Variables for ISO Modula-2 + 'm2iso': ( + iso_stdlib_var_identifiers, + ), + + # Standard Library Variables for Modula-2 R10 + 'm2r10': ( + m2r10_stdlib_var_identifiers, + ), + + # Standard Library Variables for Objective Modula-2 + 'objm2': ( + m2r10_stdlib_var_identifiers, + ), + + # Standard Library Variables for Aglet Modula-2 + 'm2iso+aglet': ( + iso_stdlib_var_identifiers, + ), + + # Standard Library Variables for GNU Modula-2 + 'm2pim+gm2': ( + pim_stdlib_var_identifiers, + ), + + # Standard Library Variables for p1 Modula-2 + 'm2iso+p1': ( + iso_stdlib_var_identifiers, + ), + + # Standard Library Variables for XDS Modula-2 + 'm2iso+xds': ( + iso_stdlib_var_identifiers, + ), + } + + # Standard Library Constants Database + stdlib_constants_db = { + # Empty entry for unknown dialect + 'unknown': ( + # LEAVE THIS EMPTY + ), + # Standard Library Constants for PIM Modula-2 + 'm2pim': ( + pim_stdlib_const_identifiers, + ), + + # Standard Library Constants for ISO Modula-2 + 'm2iso': ( + iso_stdlib_const_identifiers, + ), + + # Standard Library Constants for Modula-2 R10 + 'm2r10': ( + m2r10_stdlib_const_identifiers, + ), + + # Standard Library Constants for Objective Modula-2 + 'objm2': ( + m2r10_stdlib_const_identifiers, + ), + + # Standard Library Constants for Aglet Modula-2 + 'm2iso+aglet': ( + iso_stdlib_const_identifiers, + ), + + # Standard Library Constants for GNU Modula-2 + 'm2pim+gm2': ( + pim_stdlib_const_identifiers, + ), + + # Standard Library Constants for p1 Modula-2 + 'm2iso+p1': ( + iso_stdlib_const_identifiers, + ), + + # Standard Library Constants for XDS Modula-2 + 'm2iso+xds': ( + iso_stdlib_const_identifiers, + ), + } + +# M e t h o d s + + # initialise a lexer instance + def __init__(self, **options): + # + # check dialect options + # + dialects = get_list_opt(options, 'dialect', []) + # + for dialect_option in dialects: + if dialect_option in self.dialects[1:-1]: + # valid dialect option found + self.set_dialect(dialect_option) + break + # + # Fallback Mode (DEFAULT) + else: + # no valid dialect option + self.set_dialect('unknown') + # + self.dialect_set_by_tag = False + # + # check style options + # + styles = get_list_opt(options, 'style', []) + # + # use lowercase mode for Algol style + if 'algol' in styles or 'algol_nu' in styles: + self.algol_publication_mode = True + else: + self.algol_publication_mode = False + # + # Check option flags + # + self.treat_stdlib_adts_as_builtins = get_bool_opt( + options, 'treat_stdlib_adts_as_builtins', True) + # + # call superclass initialiser + RegexLexer.__init__(self, **options) + + # Set lexer to a specified dialect + def set_dialect(self, dialect_id): + # + # if __debug__: + # print 'entered set_dialect with arg: ', dialect_id + # + # check dialect name against known dialects + if dialect_id not in self.dialects: + dialect = 'unknown' # default + else: + dialect = dialect_id + # + # compose lexemes to reject set + lexemes_to_reject_set = set() + # add each list of reject lexemes for this dialect + for list in self.lexemes_to_reject_db[dialect]: + lexemes_to_reject_set.update(set(list)) + # + # compose reserved words set + reswords_set = set() + # add each list of reserved words for this dialect + for list in self.reserved_words_db[dialect]: + reswords_set.update(set(list)) + # + # compose builtins set + builtins_set = set() + # add each list of builtins for this dialect excluding reserved words + for list in self.builtins_db[dialect]: + builtins_set.update(set(list).difference(reswords_set)) + # + # compose pseudo-builtins set + pseudo_builtins_set = set() + # add each list of builtins for this dialect excluding reserved words + for list in self.pseudo_builtins_db[dialect]: + pseudo_builtins_set.update(set(list).difference(reswords_set)) + # + # compose ADTs set + adts_set = set() + # add each list of ADTs for this dialect excluding reserved words + for list in self.stdlib_adts_db[dialect]: + adts_set.update(set(list).difference(reswords_set)) + # + # compose modules set + modules_set = set() + # add each list of builtins for this dialect excluding builtins + for list in self.stdlib_modules_db[dialect]: + modules_set.update(set(list).difference(builtins_set)) + # + # compose types set + types_set = set() + # add each list of types for this dialect excluding builtins + for list in self.stdlib_types_db[dialect]: + types_set.update(set(list).difference(builtins_set)) + # + # compose procedures set + procedures_set = set() + # add each list of procedures for this dialect excluding builtins + for list in self.stdlib_procedures_db[dialect]: + procedures_set.update(set(list).difference(builtins_set)) + # + # compose variables set + variables_set = set() + # add each list of variables for this dialect excluding builtins + for list in self.stdlib_variables_db[dialect]: + variables_set.update(set(list).difference(builtins_set)) + # + # compose constants set + constants_set = set() + # add each list of constants for this dialect excluding builtins + for list in self.stdlib_constants_db[dialect]: + constants_set.update(set(list).difference(builtins_set)) + # + # update lexer state + self.dialect = dialect + self.lexemes_to_reject = lexemes_to_reject_set + self.reserved_words = reswords_set + self.builtins = builtins_set + self.pseudo_builtins = pseudo_builtins_set + self.adts = adts_set + self.modules = modules_set + self.types = types_set + self.procedures = procedures_set + self.variables = variables_set + self.constants = constants_set + # + # if __debug__: + # print 'exiting set_dialect' + # print ' self.dialect: ', self.dialect + # print ' self.lexemes_to_reject: ', self.lexemes_to_reject + # print ' self.reserved_words: ', self.reserved_words + # print ' self.builtins: ', self.builtins + # print ' self.pseudo_builtins: ', self.pseudo_builtins + # print ' self.adts: ', self.adts + # print ' self.modules: ', self.modules + # print ' self.types: ', self.types + # print ' self.procedures: ', self.procedures + # print ' self.variables: ', self.variables + # print ' self.types: ', self.types + # print ' self.constants: ', self.constants + + # Extracts a dialect name from a dialect tag comment string and checks + # the extracted name against known dialects. If a match is found, the + # matching name is returned, otherwise dialect id 'unknown' is returned + def get_dialect_from_dialect_tag(self, dialect_tag): + # + # if __debug__: + # print 'entered get_dialect_from_dialect_tag with arg: ', dialect_tag + # + # constants + left_tag_delim = '(*!' + right_tag_delim = '*)' + left_tag_delim_len = len(left_tag_delim) + right_tag_delim_len = len(right_tag_delim) + indicator_start = left_tag_delim_len + indicator_end = -(right_tag_delim_len) + # + # check comment string for dialect indicator + if len(dialect_tag) > (left_tag_delim_len + right_tag_delim_len) \ + and dialect_tag.startswith(left_tag_delim) \ + and dialect_tag.endswith(right_tag_delim): + # + # if __debug__: + # print 'dialect tag found' + # + # extract dialect indicator + indicator = dialect_tag[indicator_start:indicator_end] + # + # if __debug__: + # print 'extracted: ', indicator + # + # check against known dialects + for index in range(1, len(self.dialects)): + # + # if __debug__: + # print 'dialects[', index, ']: ', self.dialects[index] + # + if indicator == self.dialects[index]: + # + # if __debug__: + # print 'matching dialect found' + # + # indicator matches known dialect + return indicator + else: + # indicator does not match any dialect + return 'unknown' # default + else: + # invalid indicator string + return 'unknown' # default + + # intercept the token stream, modify token attributes and return them + def get_tokens_unprocessed(self, text): + for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): + # + # check for dialect tag if dialect has not been set by tag + if not self.dialect_set_by_tag and token == Comment.Special: + indicated_dialect = self.get_dialect_from_dialect_tag(value) + if indicated_dialect != 'unknown': + # token is a dialect indicator + # reset reserved words and builtins + self.set_dialect(indicated_dialect) + self.dialect_set_by_tag = True + # + # check for reserved words, predefined and stdlib identifiers + if token is Name: + if value in self.reserved_words: + token = Keyword.Reserved + if self.algol_publication_mode: + value = value.lower() + # + elif value in self.builtins: + token = Name.Builtin + if self.algol_publication_mode: + value = value.lower() + # + elif value in self.pseudo_builtins: + token = Name.Builtin.Pseudo + if self.algol_publication_mode: + value = value.lower() + # + elif value in self.adts: + if not self.treat_stdlib_adts_as_builtins: + token = Name.Namespace + else: + token = Name.Builtin.Pseudo + if self.algol_publication_mode: + value = value.lower() + # + elif value in self.modules: + token = Name.Namespace + # + elif value in self.types: + token = Name.Class + # + elif value in self.procedures: + token = Name.Function + # + elif value in self.variables: + token = Name.Variable + # + elif value in self.constants: + token = Name.Constant + # + elif token in Number: + # + # mark prefix number literals as error for PIM and ISO dialects + if self.dialect not in ('unknown', 'm2r10', 'objm2'): + if "'" in value or value[0:2] in ('0b', '0x', '0u'): + token = Error + # + elif self.dialect in ('m2r10', 'objm2'): + # mark base-8 number literals as errors for M2 R10 and ObjM2 + if token is Number.Oct: + token = Error + # mark suffix base-16 literals as errors for M2 R10 and ObjM2 + elif token is Number.Hex and 'H' in value: + token = Error + # mark real numbers with E as errors for M2 R10 and ObjM2 + elif token is Number.Float and 'E' in value: + token = Error + # + elif token in Comment: + # + # mark single line comment as error for PIM and ISO dialects + if token is Comment.Single: + if self.dialect not in ('unknown', 'm2r10', 'objm2'): + token = Error + # + if token is Comment.Preproc: + # mark ISO pragma as error for PIM dialects + if value.startswith('<*') and \ + self.dialect.startswith('m2pim'): + token = Error + # mark PIM pragma as comment for other dialects + elif value.startswith('(*$') and \ + self.dialect != 'unknown' and \ + not self.dialect.startswith('m2pim'): + token = Comment.Multiline + # + else: # token is neither Name nor Comment + # + # mark lexemes matching the dialect's error token set as errors + if value in self.lexemes_to_reject: + token = Error + # + # substitute lexemes when in Algol mode + if self.algol_publication_mode: + if value == '#': + value = '≠' + elif value == '<=': + value = '≤' + elif value == '>=': + value = '≥' + elif value == '==': + value = '≡' + elif value == '*.': + value = '•' + + # return result + yield index, token, value diff --git a/pygments/lexers/monte.py b/pygments/lexers/monte.py old mode 100644 new mode 100755 index e181c94..ee35637 --- a/pygments/lexers/monte.py +++ b/pygments/lexers/monte.py @@ -1,204 +1,204 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.monte - ~~~~~~~~~~~~~~~~~~~~~ - - Lexer for the Monte programming language. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \ - Punctuation, String, Whitespace -from pygments.lexer import RegexLexer, include, words - -__all__ = ['MonteLexer'] - - -# `var` handled separately -# `interface` handled separately -_declarations = ['bind', 'def', 'fn', 'object'] -_methods = ['method', 'to'] -_keywords = [ - 'as', 'break', 'catch', 'continue', 'else', 'escape', 'exit', 'exports', - 'extends', 'finally', 'for', 'guards', 'if', 'implements', 'import', - 'in', 'match', 'meta', 'pass', 'return', 'switch', 'try', 'via', 'when', - 'while', -] -_operators = [ - # Unary - '~', '!', - # Binary - '+', '-', '*', '/', '%', '**', '&', '|', '^', '<<', '>>', - # Binary augmented - '+=', '-=', '*=', '/=', '%=', '**=', '&=', '|=', '^=', '<<=', '>>=', - # Comparison - '==', '!=', '<', '<=', '>', '>=', '<=>', - # Patterns and assignment - ':=', '?', '=~', '!~', '=>', - # Calls and sends - '.', '<-', '->', -] -_escape_pattern = ( - r'(?:\\x[0-9a-fA-F]{2}|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|' - r'\\["\'\\bftnr])') -# _char = _escape_chars + [('.', String.Char)] -_identifier = r'[_a-zA-Z]\w*' - -_constants = [ - # Void constants - 'null', - # Bool constants - 'false', 'true', - # Double constants - 'Infinity', 'NaN', - # Special objects - 'M', 'Ref', 'throw', 'traceln', -] - -_guards = [ - 'Any', 'Binding', 'Bool', 'Bytes', 'Char', 'DeepFrozen', 'Double', - 'Empty', 'Int', 'List', 'Map', 'Near', 'NullOk', 'Same', 'Selfless', - 'Set', 'Str', 'SubrangeGuard', 'Transparent', 'Void', -] - -_safeScope = [ - '_accumulateList', '_accumulateMap', '_auditedBy', '_bind', - '_booleanFlow', '_comparer', '_equalizer', '_iterForever', '_loop', - '_makeBytes', '_makeDouble', '_makeFinalSlot', '_makeInt', '_makeList', - '_makeMap', '_makeMessageDesc', '_makeOrderedSpace', '_makeParamDesc', - '_makeProtocolDesc', '_makeSourceSpan', '_makeString', '_makeVarSlot', - '_makeVerbFacet', '_mapExtract', '_matchSame', '_quasiMatcher', - '_slotToBinding', '_splitList', '_suchThat', '_switchFailed', - '_validateFor', 'b__quasiParser', 'eval', 'import', 'm__quasiParser', - 'makeBrandPair', 'makeLazySlot', 'safeScope', 'simple__quasiParser', -] - - -class MonteLexer(RegexLexer): - """ - Lexer for the `Monte `_ programming language. - - .. versionadded:: 2.2 - """ - name = 'Monte' - aliases = ['monte'] - filenames = ['*.mt'] - - tokens = { - 'root': [ - # Comments - (r'#[^\n]*\n', Comment), - - # Docstrings - # Apologies for the non-greedy matcher here. - (r'/\*\*.*?\*/', String.Doc), - - # `var` declarations - (r'\bvar\b', Keyword.Declaration, 'var'), - - # `interface` declarations - (r'\binterface\b', Keyword.Declaration, 'interface'), - - # method declarations - (words(_methods, prefix='\\b', suffix='\\b'), - Keyword, 'method'), - - # All other declarations - (words(_declarations, prefix='\\b', suffix='\\b'), - Keyword.Declaration), - - # Keywords - (words(_keywords, prefix='\\b', suffix='\\b'), Keyword), - - # Literals - ('[+-]?0x[_0-9a-fA-F]+', Number.Hex), - (r'[+-]?[_0-9]+\.[_0-9]*([eE][+-]?[_0-9]+)?', Number.Float), - ('[+-]?[_0-9]+', Number.Integer), - ("'", String.Double, 'char'), - ('"', String.Double, 'string'), - - # Quasiliterals - ('`', String.Backtick, 'ql'), - - # Operators - (words(_operators), Operator), - - # Verb operators - (_identifier + '=', Operator.Word), - - # Safe scope constants - (words(_constants, prefix='\\b', suffix='\\b'), - Keyword.Pseudo), - - # Safe scope guards - (words(_guards, prefix='\\b', suffix='\\b'), Keyword.Type), - - # All other safe scope names - (words(_safeScope, prefix='\\b', suffix='\\b'), - Name.Builtin), - - # Identifiers - (_identifier, Name), - - # Punctuation - (r'\(|\)|\{|\}|\[|\]|:|,', Punctuation), - - # Whitespace - (' +', Whitespace), - - # Definite lexer errors - ('=', Error), - ], - 'char': [ - # It is definitely an error to have a char of width == 0. - ("'", Error, 'root'), - (_escape_pattern, String.Escape, 'charEnd'), - ('.', String.Char, 'charEnd'), - ], - 'charEnd': [ - ("'", String.Char, '#pop:2'), - # It is definitely an error to have a char of width > 1. - ('.', Error), - ], - # The state of things coming into an interface. - 'interface': [ - (' +', Whitespace), - (_identifier, Name.Class, '#pop'), - include('root'), - ], - # The state of things coming into a method. - 'method': [ - (' +', Whitespace), - (_identifier, Name.Function, '#pop'), - include('root'), - ], - 'string': [ - ('"', String.Double, 'root'), - (_escape_pattern, String.Escape), - (r'\n', String.Double), - ('.', String.Double), - ], - 'ql': [ - ('`', String.Backtick, 'root'), - (r'\$' + _escape_pattern, String.Escape), - (r'\$\$', String.Escape), - (r'@@', String.Escape), - (r'\$\{', String.Interpol, 'qlNest'), - (r'@\{', String.Interpol, 'qlNest'), - (r'\$' + _identifier, Name), - ('@' + _identifier, Name), - ('.', String.Backtick), - ], - 'qlNest': [ - (r'\}', String.Interpol, '#pop'), - include('root'), - ], - # The state of things immediately following `var`. - 'var': [ - (' +', Whitespace), - (_identifier, Name.Variable, '#pop'), - include('root'), - ], - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.monte + ~~~~~~~~~~~~~~~~~~~~~ + + Lexer for the Monte programming language. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \ + Punctuation, String, Whitespace +from pygments.lexer import RegexLexer, include, words + +__all__ = ['MonteLexer'] + + +# `var` handled separately +# `interface` handled separately +_declarations = ['bind', 'def', 'fn', 'object'] +_methods = ['method', 'to'] +_keywords = [ + 'as', 'break', 'catch', 'continue', 'else', 'escape', 'exit', 'exports', + 'extends', 'finally', 'for', 'guards', 'if', 'implements', 'import', + 'in', 'match', 'meta', 'pass', 'return', 'switch', 'try', 'via', 'when', + 'while', +] +_operators = [ + # Unary + '~', '!', + # Binary + '+', '-', '*', '/', '%', '**', '&', '|', '^', '<<', '>>', + # Binary augmented + '+=', '-=', '*=', '/=', '%=', '**=', '&=', '|=', '^=', '<<=', '>>=', + # Comparison + '==', '!=', '<', '<=', '>', '>=', '<=>', + # Patterns and assignment + ':=', '?', '=~', '!~', '=>', + # Calls and sends + '.', '<-', '->', +] +_escape_pattern = ( + r'(?:\\x[0-9a-fA-F]{2}|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|' + r'\\["\'\\bftnr])') +# _char = _escape_chars + [('.', String.Char)] +_identifier = r'[_a-zA-Z]\w*' + +_constants = [ + # Void constants + 'null', + # Bool constants + 'false', 'true', + # Double constants + 'Infinity', 'NaN', + # Special objects + 'M', 'Ref', 'throw', 'traceln', +] + +_guards = [ + 'Any', 'Binding', 'Bool', 'Bytes', 'Char', 'DeepFrozen', 'Double', + 'Empty', 'Int', 'List', 'Map', 'Near', 'NullOk', 'Same', 'Selfless', + 'Set', 'Str', 'SubrangeGuard', 'Transparent', 'Void', +] + +_safeScope = [ + '_accumulateList', '_accumulateMap', '_auditedBy', '_bind', + '_booleanFlow', '_comparer', '_equalizer', '_iterForever', '_loop', + '_makeBytes', '_makeDouble', '_makeFinalSlot', '_makeInt', '_makeList', + '_makeMap', '_makeMessageDesc', '_makeOrderedSpace', '_makeParamDesc', + '_makeProtocolDesc', '_makeSourceSpan', '_makeString', '_makeVarSlot', + '_makeVerbFacet', '_mapExtract', '_matchSame', '_quasiMatcher', + '_slotToBinding', '_splitList', '_suchThat', '_switchFailed', + '_validateFor', 'b__quasiParser', 'eval', 'import', 'm__quasiParser', + 'makeBrandPair', 'makeLazySlot', 'safeScope', 'simple__quasiParser', +] + + +class MonteLexer(RegexLexer): + """ + Lexer for the `Monte `_ programming language. + + .. versionadded:: 2.2 + """ + name = 'Monte' + aliases = ['monte'] + filenames = ['*.mt'] + + tokens = { + 'root': [ + # Comments + (r'#[^\n]*\n', Comment), + + # Docstrings + # Apologies for the non-greedy matcher here. + (r'/\*\*.*?\*/', String.Doc), + + # `var` declarations + (r'\bvar\b', Keyword.Declaration, 'var'), + + # `interface` declarations + (r'\binterface\b', Keyword.Declaration, 'interface'), + + # method declarations + (words(_methods, prefix='\\b', suffix='\\b'), + Keyword, 'method'), + + # All other declarations + (words(_declarations, prefix='\\b', suffix='\\b'), + Keyword.Declaration), + + # Keywords + (words(_keywords, prefix='\\b', suffix='\\b'), Keyword), + + # Literals + ('[+-]?0x[_0-9a-fA-F]+', Number.Hex), + (r'[+-]?[_0-9]+\.[_0-9]*([eE][+-]?[_0-9]+)?', Number.Float), + ('[+-]?[_0-9]+', Number.Integer), + ("'", String.Double, 'char'), + ('"', String.Double, 'string'), + + # Quasiliterals + ('`', String.Backtick, 'ql'), + + # Operators + (words(_operators), Operator), + + # Verb operators + (_identifier + '=', Operator.Word), + + # Safe scope constants + (words(_constants, prefix='\\b', suffix='\\b'), + Keyword.Pseudo), + + # Safe scope guards + (words(_guards, prefix='\\b', suffix='\\b'), Keyword.Type), + + # All other safe scope names + (words(_safeScope, prefix='\\b', suffix='\\b'), + Name.Builtin), + + # Identifiers + (_identifier, Name), + + # Punctuation + (r'\(|\)|\{|\}|\[|\]|:|,', Punctuation), + + # Whitespace + (' +', Whitespace), + + # Definite lexer errors + ('=', Error), + ], + 'char': [ + # It is definitely an error to have a char of width == 0. + ("'", Error, 'root'), + (_escape_pattern, String.Escape, 'charEnd'), + ('.', String.Char, 'charEnd'), + ], + 'charEnd': [ + ("'", String.Char, '#pop:2'), + # It is definitely an error to have a char of width > 1. + ('.', Error), + ], + # The state of things coming into an interface. + 'interface': [ + (' +', Whitespace), + (_identifier, Name.Class, '#pop'), + include('root'), + ], + # The state of things coming into a method. + 'method': [ + (' +', Whitespace), + (_identifier, Name.Function, '#pop'), + include('root'), + ], + 'string': [ + ('"', String.Double, 'root'), + (_escape_pattern, String.Escape), + (r'\n', String.Double), + ('.', String.Double), + ], + 'ql': [ + ('`', String.Backtick, 'root'), + (r'\$' + _escape_pattern, String.Escape), + (r'\$\$', String.Escape), + (r'@@', String.Escape), + (r'\$\{', String.Interpol, 'qlNest'), + (r'@\{', String.Interpol, 'qlNest'), + (r'\$' + _identifier, Name), + ('@' + _identifier, Name), + ('.', String.Backtick), + ], + 'qlNest': [ + (r'\}', String.Interpol, '#pop'), + include('root'), + ], + # The state of things immediately following `var`. + 'var': [ + (' +', Whitespace), + (_identifier, Name.Variable, '#pop'), + include('root'), + ], + } diff --git a/pygments/lexers/mosel.py b/pygments/lexers/mosel.py old mode 100644 new mode 100755 index 1dbda1d..1f41682 --- a/pygments/lexers/mosel.py +++ b/pygments/lexers/mosel.py @@ -1,448 +1,448 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.mosel - ~~~~~~~~~~~~~~~~~~~~~ - - Lexers for the mosel language. - http://www.fico.com/en/products/fico-xpress-optimization - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import RegexLexer, words -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation - -__all__ = ['MoselLexer'] - -FUNCTIONS = ( - # core functions - '_', - 'abs', - 'arctan', - 'asproc', - 'assert', - 'bitflip', - 'bitneg', - 'bitset', - 'bitshift', - 'bittest', - 'bitval', - 'ceil', - 'cos', - 'create', - 'currentdate', - 'currenttime', - 'cutelt', - 'cutfirst', - 'cuthead', - 'cutlast', - 'cuttail', - 'datablock', - 'delcell', - 'exists', - 'exit', - 'exp', - 'exportprob', - 'fclose', - 'fflush', - 'finalize', - 'findfirst', - 'findlast', - 'floor', - 'fopen', - 'fselect', - 'fskipline', - 'fwrite', - 'fwrite_', - 'fwriteln', - 'fwriteln_', - 'getact', - 'getcoeff', - 'getcoeffs', - 'getdual', - 'getelt', - 'getfid', - 'getfirst', - 'getfname', - 'gethead', - 'getlast', - 'getobjval', - 'getparam', - 'getrcost', - 'getreadcnt', - 'getreverse', - 'getsize', - 'getslack', - 'getsol', - 'gettail', - 'gettype', - 'getvars', - 'isdynamic', - 'iseof', - 'isfinite', - 'ishidden', - 'isinf', - 'isnan', - 'isodd', - 'ln', - 'localsetparam', - 'log', - 'makesos1', - 'makesos2', - 'maxlist', - 'memoryuse', - 'minlist', - 'newmuid', - 'publish', - 'random', - 'read', - 'readln', - 'reset', - 'restoreparam', - 'reverse', - 'round', - 'setcoeff', - 'sethidden', - 'setioerr', - 'setmatherr', - 'setname', - 'setparam', - 'setrandseed', - 'setrange', - 'settype', - 'sin', - 'splithead', - 'splittail', - 'sqrt', - 'strfmt', - 'substr', - 'timestamp', - 'unpublish', - 'versionnum', - 'versionstr', - 'write', - 'write_', - 'writeln', - 'writeln_', - - # mosel exam mmxprs | sed -n -e "s/ [pf][a-z]* \([a-zA-Z0-9_]*\).*/'\1',/p" | sort -u - 'addcut', - 'addcuts', - 'addmipsol', - 'basisstability', - 'calcsolinfo', - 'clearmipdir', - 'clearmodcut', - 'command', - 'copysoltoinit', - 'crossoverlpsol', - 'defdelayedrows', - 'defsecurevecs', - 'delcuts', - 'dropcuts', - 'estimatemarginals', - 'fixglobal', - 'flushmsgq', - 'getbstat', - 'getcnlist', - 'getcplist', - 'getdualray', - 'getiis', - 'getiissense', - 'getiistype', - 'getinfcause', - 'getinfeas', - 'getlb', - 'getlct', - 'getleft', - 'getloadedlinctrs', - 'getloadedmpvars', - 'getname', - 'getprimalray', - 'getprobstat', - 'getrange', - 'getright', - 'getsensrng', - 'getsize', - 'getsol', - 'gettype', - 'getub', - 'getvars', - 'gety', - 'hasfeature', - 'implies', - 'indicator', - 'initglobal', - 'ishidden', - 'isiisvalid', - 'isintegral', - 'loadbasis', - 'loadcuts', - 'loadlpsol', - 'loadmipsol', - 'loadprob', - 'maximise', - 'maximize', - 'minimise', - 'minimize', - 'postsolve', - 'readbasis', - 'readdirs', - 'readsol', - 'refinemipsol', - 'rejectintsol', - 'repairinfeas', - 'repairinfeas_deprec', - 'resetbasis', - 'resetiis', - 'resetsol', - 'savebasis', - 'savemipsol', - 'savesol', - 'savestate', - 'selectsol', - 'setarchconsistency', - 'setbstat', - 'setcallback', - 'setcbcutoff', - 'setgndata', - 'sethidden', - 'setlb', - 'setmipdir', - 'setmodcut', - 'setsol', - 'setub', - 'setucbdata', - 'stopoptimise', - 'stopoptimize', - 'storecut', - 'storecuts', - 'unloadprob', - 'uselastbarsol', - 'writebasis', - 'writedirs', - 'writeprob', - 'writesol', - 'xor', - 'xprs_addctr', - 'xprs_addindic', - - # mosel exam mmsystem | sed -n -e "s/ [pf][a-z]* \([a-zA-Z0-9_]*\).*/'\1',/p" | sort -u - 'addmonths', - 'copytext', - 'cuttext', - 'deltext', - 'endswith', - 'erase', - 'expandpath', - 'fcopy', - 'fdelete', - 'findfiles', - 'findtext', - 'fmove', - 'formattext', - 'getasnumber', - 'getchar', - 'getcwd', - 'getdate', - 'getday', - 'getdaynum', - 'getdays', - 'getdirsep', - 'getdsoparam', - 'getendparse', - 'getenv', - 'getfsize', - 'getfstat', - 'getftime', - 'gethour', - 'getminute', - 'getmonth', - 'getmsec', - 'getoserrmsg', - 'getoserror', - 'getpathsep', - 'getqtype', - 'getsecond', - 'getsepchar', - 'getsize', - 'getstart', - 'getsucc', - 'getsysinfo', - 'getsysstat', - 'gettime', - 'gettmpdir', - 'gettrim', - 'getweekday', - 'getyear', - 'inserttext', - 'isvalid', - 'jointext', - 'makedir', - 'makepath', - 'newtar', - 'newzip', - 'nextfield', - 'openpipe', - 'parseextn', - 'parseint', - 'parsereal', - 'parsetext', - 'pastetext', - 'pathmatch', - 'pathsplit', - 'qsort', - 'quote', - 'readtextline', - 'regmatch', - 'regreplace', - 'removedir', - 'removefiles', - 'setchar', - 'setdate', - 'setday', - 'setdsoparam', - 'setendparse', - 'setenv', - 'sethour', - 'setminute', - 'setmonth', - 'setmsec', - 'setoserror', - 'setqtype', - 'setsecond', - 'setsepchar', - 'setstart', - 'setsucc', - 'settime', - 'settrim', - 'setyear', - 'sleep', - 'splittext', - 'startswith', - 'system', - 'tarlist', - 'textfmt', - 'tolower', - 'toupper', - 'trim', - 'untar', - 'unzip', - 'ziplist', - - # mosel exam mmjobs | sed -n -e "s/ [pf][a-z]* \([a-zA-Z0-9_]*\).*/'\1',/p" | sort -u - 'canceltimer', - 'clearaliases', - 'compile', - 'connect', - 'detach', - 'disconnect', - 'dropnextevent', - 'findxsrvs', - 'getaliases', - 'getannidents', - 'getannotations', - 'getbanner', - 'getclass', - 'getdsoprop', - 'getdsopropnum', - 'getexitcode', - 'getfromgid', - 'getfromid', - 'getfromuid', - 'getgid', - 'gethostalias', - 'getid', - 'getmodprop', - 'getmodpropnum', - 'getnextevent', - 'getnode', - 'getrmtid', - 'getstatus', - 'getsysinfo', - 'gettimer', - 'getuid', - 'getvalue', - 'isqueueempty', - 'load', - 'nullevent', - 'peeknextevent', - 'resetmodpar', - 'run', - 'send', - 'setcontrol', - 'setdefstream', - 'setgid', - 'sethostalias', - 'setmodpar', - 'settimer', - 'setuid', - 'setworkdir', - 'stop', - 'unload', - 'wait', - 'waitexpired', - 'waitfor', - 'waitforend', -) - - -class MoselLexer(RegexLexer): - """ - For the Mosel optimization language. - - .. versionadded:: 2.6 - """ - name = 'Mosel' - aliases = ['mosel'] - filenames = ['*.mos'] - - tokens = { - 'root': [ - (r'\n', Text), - (r'\s+', Text.Whitespace), - (r'!.*?\n', Comment.Single), - (r'\(!(.|\n)*?!\)', Comment.Multiline), - (words(( - 'and', 'as', 'break', 'case', 'count', 'declarations', 'do', - 'dynamic', 'elif', 'else', 'end-', 'end', 'evaluation', 'false', - 'forall', 'forward', 'from', 'function', 'hashmap', 'if', - 'imports', 'include', 'initialisations', 'initializations', 'inter', - 'max', 'min', 'model', 'namespace', 'next', 'not', 'nsgroup', - 'nssearch', 'of', 'options', 'or', 'package', 'parameters', - 'procedure', 'public', 'prod', 'record', 'repeat', 'requirements', - 'return', 'sum', 'then', 'to', 'true', 'union', 'until', 'uses', - 'version', 'while', 'with'), prefix=r'\b', suffix=r'\b'), - Keyword.Builtin), - (words(( - 'range', 'array', 'set', 'list', 'mpvar', 'mpproblem', 'linctr', - 'nlctr', 'integer', 'string', 'real', 'boolean', 'text', 'time', - 'date', 'datetime', 'returned', 'Model', 'Mosel', 'counter', - 'xmldoc', 'is_sos1', 'is_sos2', 'is_integer', 'is_binary', - 'is_continuous', 'is_free', 'is_semcont', 'is_semint', - 'is_partint'), prefix=r'\b', suffix=r'\b'), - Keyword.Type), - (r'(\+|\-|\*|/|=|<=|>=|\||\^|<|>|<>|\.\.|\.|:=|::|:|in|mod|div)', - Operator), - (r'[()\[\]{},;]+', Punctuation), - (words(FUNCTIONS, prefix=r'\b', suffix=r'\b'), Name.Function), - (r'(\d+\.(?!\.)\d*|\.(?!.)\d+)([eE][+-]?\d+)?', Number.Float), - (r'\d+([eE][+-]?\d+)?', Number.Integer), - (r'[+-]?Infinity', Number.Integer), - (r'0[xX][0-9a-fA-F]+', Number), - (r'"', String.Double, 'double_quote'), - (r'\'', String.Single, 'single_quote'), - (r'(\w+|(\.(?!\.)))', Text), - ], - 'single_quote': [ - (r'\'', String.Single, '#pop'), - (r'[^\']+', String.Single), - ], - 'double_quote': [ - (r'(\\"|\\[0-7]{1,3}\D|\\[abfnrtv]|\\\\)', String.Escape), - (r'\"', String.Double, '#pop'), - (r'[^"\\]+', String.Double), - ], - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.mosel + ~~~~~~~~~~~~~~~~~~~~~ + + Lexers for the mosel language. + http://www.fico.com/en/products/fico-xpress-optimization + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['MoselLexer'] + +FUNCTIONS = ( + # core functions + '_', + 'abs', + 'arctan', + 'asproc', + 'assert', + 'bitflip', + 'bitneg', + 'bitset', + 'bitshift', + 'bittest', + 'bitval', + 'ceil', + 'cos', + 'create', + 'currentdate', + 'currenttime', + 'cutelt', + 'cutfirst', + 'cuthead', + 'cutlast', + 'cuttail', + 'datablock', + 'delcell', + 'exists', + 'exit', + 'exp', + 'exportprob', + 'fclose', + 'fflush', + 'finalize', + 'findfirst', + 'findlast', + 'floor', + 'fopen', + 'fselect', + 'fskipline', + 'fwrite', + 'fwrite_', + 'fwriteln', + 'fwriteln_', + 'getact', + 'getcoeff', + 'getcoeffs', + 'getdual', + 'getelt', + 'getfid', + 'getfirst', + 'getfname', + 'gethead', + 'getlast', + 'getobjval', + 'getparam', + 'getrcost', + 'getreadcnt', + 'getreverse', + 'getsize', + 'getslack', + 'getsol', + 'gettail', + 'gettype', + 'getvars', + 'isdynamic', + 'iseof', + 'isfinite', + 'ishidden', + 'isinf', + 'isnan', + 'isodd', + 'ln', + 'localsetparam', + 'log', + 'makesos1', + 'makesos2', + 'maxlist', + 'memoryuse', + 'minlist', + 'newmuid', + 'publish', + 'random', + 'read', + 'readln', + 'reset', + 'restoreparam', + 'reverse', + 'round', + 'setcoeff', + 'sethidden', + 'setioerr', + 'setmatherr', + 'setname', + 'setparam', + 'setrandseed', + 'setrange', + 'settype', + 'sin', + 'splithead', + 'splittail', + 'sqrt', + 'strfmt', + 'substr', + 'timestamp', + 'unpublish', + 'versionnum', + 'versionstr', + 'write', + 'write_', + 'writeln', + 'writeln_', + + # mosel exam mmxprs | sed -n -e "s/ [pf][a-z]* \([a-zA-Z0-9_]*\).*/'\1',/p" | sort -u + 'addcut', + 'addcuts', + 'addmipsol', + 'basisstability', + 'calcsolinfo', + 'clearmipdir', + 'clearmodcut', + 'command', + 'copysoltoinit', + 'crossoverlpsol', + 'defdelayedrows', + 'defsecurevecs', + 'delcuts', + 'dropcuts', + 'estimatemarginals', + 'fixglobal', + 'flushmsgq', + 'getbstat', + 'getcnlist', + 'getcplist', + 'getdualray', + 'getiis', + 'getiissense', + 'getiistype', + 'getinfcause', + 'getinfeas', + 'getlb', + 'getlct', + 'getleft', + 'getloadedlinctrs', + 'getloadedmpvars', + 'getname', + 'getprimalray', + 'getprobstat', + 'getrange', + 'getright', + 'getsensrng', + 'getsize', + 'getsol', + 'gettype', + 'getub', + 'getvars', + 'gety', + 'hasfeature', + 'implies', + 'indicator', + 'initglobal', + 'ishidden', + 'isiisvalid', + 'isintegral', + 'loadbasis', + 'loadcuts', + 'loadlpsol', + 'loadmipsol', + 'loadprob', + 'maximise', + 'maximize', + 'minimise', + 'minimize', + 'postsolve', + 'readbasis', + 'readdirs', + 'readsol', + 'refinemipsol', + 'rejectintsol', + 'repairinfeas', + 'repairinfeas_deprec', + 'resetbasis', + 'resetiis', + 'resetsol', + 'savebasis', + 'savemipsol', + 'savesol', + 'savestate', + 'selectsol', + 'setarchconsistency', + 'setbstat', + 'setcallback', + 'setcbcutoff', + 'setgndata', + 'sethidden', + 'setlb', + 'setmipdir', + 'setmodcut', + 'setsol', + 'setub', + 'setucbdata', + 'stopoptimise', + 'stopoptimize', + 'storecut', + 'storecuts', + 'unloadprob', + 'uselastbarsol', + 'writebasis', + 'writedirs', + 'writeprob', + 'writesol', + 'xor', + 'xprs_addctr', + 'xprs_addindic', + + # mosel exam mmsystem | sed -n -e "s/ [pf][a-z]* \([a-zA-Z0-9_]*\).*/'\1',/p" | sort -u + 'addmonths', + 'copytext', + 'cuttext', + 'deltext', + 'endswith', + 'erase', + 'expandpath', + 'fcopy', + 'fdelete', + 'findfiles', + 'findtext', + 'fmove', + 'formattext', + 'getasnumber', + 'getchar', + 'getcwd', + 'getdate', + 'getday', + 'getdaynum', + 'getdays', + 'getdirsep', + 'getdsoparam', + 'getendparse', + 'getenv', + 'getfsize', + 'getfstat', + 'getftime', + 'gethour', + 'getminute', + 'getmonth', + 'getmsec', + 'getoserrmsg', + 'getoserror', + 'getpathsep', + 'getqtype', + 'getsecond', + 'getsepchar', + 'getsize', + 'getstart', + 'getsucc', + 'getsysinfo', + 'getsysstat', + 'gettime', + 'gettmpdir', + 'gettrim', + 'getweekday', + 'getyear', + 'inserttext', + 'isvalid', + 'jointext', + 'makedir', + 'makepath', + 'newtar', + 'newzip', + 'nextfield', + 'openpipe', + 'parseextn', + 'parseint', + 'parsereal', + 'parsetext', + 'pastetext', + 'pathmatch', + 'pathsplit', + 'qsort', + 'quote', + 'readtextline', + 'regmatch', + 'regreplace', + 'removedir', + 'removefiles', + 'setchar', + 'setdate', + 'setday', + 'setdsoparam', + 'setendparse', + 'setenv', + 'sethour', + 'setminute', + 'setmonth', + 'setmsec', + 'setoserror', + 'setqtype', + 'setsecond', + 'setsepchar', + 'setstart', + 'setsucc', + 'settime', + 'settrim', + 'setyear', + 'sleep', + 'splittext', + 'startswith', + 'system', + 'tarlist', + 'textfmt', + 'tolower', + 'toupper', + 'trim', + 'untar', + 'unzip', + 'ziplist', + + # mosel exam mmjobs | sed -n -e "s/ [pf][a-z]* \([a-zA-Z0-9_]*\).*/'\1',/p" | sort -u + 'canceltimer', + 'clearaliases', + 'compile', + 'connect', + 'detach', + 'disconnect', + 'dropnextevent', + 'findxsrvs', + 'getaliases', + 'getannidents', + 'getannotations', + 'getbanner', + 'getclass', + 'getdsoprop', + 'getdsopropnum', + 'getexitcode', + 'getfromgid', + 'getfromid', + 'getfromuid', + 'getgid', + 'gethostalias', + 'getid', + 'getmodprop', + 'getmodpropnum', + 'getnextevent', + 'getnode', + 'getrmtid', + 'getstatus', + 'getsysinfo', + 'gettimer', + 'getuid', + 'getvalue', + 'isqueueempty', + 'load', + 'nullevent', + 'peeknextevent', + 'resetmodpar', + 'run', + 'send', + 'setcontrol', + 'setdefstream', + 'setgid', + 'sethostalias', + 'setmodpar', + 'settimer', + 'setuid', + 'setworkdir', + 'stop', + 'unload', + 'wait', + 'waitexpired', + 'waitfor', + 'waitforend', +) + + +class MoselLexer(RegexLexer): + """ + For the Mosel optimization language. + + .. versionadded:: 2.6 + """ + name = 'Mosel' + aliases = ['mosel'] + filenames = ['*.mos'] + + tokens = { + 'root': [ + (r'\n', Text), + (r'\s+', Text.Whitespace), + (r'!.*?\n', Comment.Single), + (r'\(!(.|\n)*?!\)', Comment.Multiline), + (words(( + 'and', 'as', 'break', 'case', 'count', 'declarations', 'do', + 'dynamic', 'elif', 'else', 'end-', 'end', 'evaluation', 'false', + 'forall', 'forward', 'from', 'function', 'hashmap', 'if', + 'imports', 'include', 'initialisations', 'initializations', 'inter', + 'max', 'min', 'model', 'namespace', 'next', 'not', 'nsgroup', + 'nssearch', 'of', 'options', 'or', 'package', 'parameters', + 'procedure', 'public', 'prod', 'record', 'repeat', 'requirements', + 'return', 'sum', 'then', 'to', 'true', 'union', 'until', 'uses', + 'version', 'while', 'with'), prefix=r'\b', suffix=r'\b'), + Keyword.Builtin), + (words(( + 'range', 'array', 'set', 'list', 'mpvar', 'mpproblem', 'linctr', + 'nlctr', 'integer', 'string', 'real', 'boolean', 'text', 'time', + 'date', 'datetime', 'returned', 'Model', 'Mosel', 'counter', + 'xmldoc', 'is_sos1', 'is_sos2', 'is_integer', 'is_binary', + 'is_continuous', 'is_free', 'is_semcont', 'is_semint', + 'is_partint'), prefix=r'\b', suffix=r'\b'), + Keyword.Type), + (r'(\+|\-|\*|/|=|<=|>=|\||\^|<|>|<>|\.\.|\.|:=|::|:|in|mod|div)', + Operator), + (r'[()\[\]{},;]+', Punctuation), + (words(FUNCTIONS, prefix=r'\b', suffix=r'\b'), Name.Function), + (r'(\d+\.(?!\.)\d*|\.(?!.)\d+)([eE][+-]?\d+)?', Number.Float), + (r'\d+([eE][+-]?\d+)?', Number.Integer), + (r'[+-]?Infinity', Number.Integer), + (r'0[xX][0-9a-fA-F]+', Number), + (r'"', String.Double, 'double_quote'), + (r'\'', String.Single, 'single_quote'), + (r'(\w+|(\.(?!\.)))', Text), + ], + 'single_quote': [ + (r'\'', String.Single, '#pop'), + (r'[^\']+', String.Single), + ], + 'double_quote': [ + (r'(\\"|\\[0-7]{1,3}\D|\\[abfnrtv]|\\\\)', String.Escape), + (r'\"', String.Double, '#pop'), + (r'[^"\\]+', String.Double), + ], + } diff --git a/pygments/lexers/ncl.py b/pygments/lexers/ncl.py old mode 100644 new mode 100755 index e2edd6c..a7f8e2f --- a/pygments/lexers/ncl.py +++ b/pygments/lexers/ncl.py @@ -1,894 +1,894 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.ncl - ~~~~~~~~~~~~~~~~~~~ - - Lexers for NCAR Command Language. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, include, words -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation - -__all__ = ['NCLLexer'] - - -class NCLLexer(RegexLexer): - """ - Lexer for NCL code. - - .. versionadded:: 2.2 - """ - name = 'NCL' - aliases = ['ncl'] - filenames = ['*.ncl'] - mimetypes = ['text/ncl'] - flags = re.MULTILINE - - tokens = { - 'root': [ - (r';.*\n', Comment), - include('strings'), - include('core'), - (r'[a-zA-Z_]\w*', Name), - include('nums'), - (r'[\s]+', Text), - ], - 'core': [ - # Statements - (words(( - 'begin', 'break', 'continue', 'create', 'defaultapp', 'do', - 'else', 'end', 'external', 'exit', 'True', 'False', 'file', 'function', - 'getvalues', 'graphic', 'group', 'if', 'list', 'load', 'local', - 'new', '_Missing', 'Missing', 'noparent', 'procedure', - 'quit', 'QUIT', 'Quit', 'record', 'return', 'setvalues', 'stop', - 'then', 'while'), prefix=r'\b', suffix=r'\s*\b'), - Keyword), - - # Data Types - (words(( - 'ubyte', 'uint', 'uint64', 'ulong', 'string', 'byte', - 'character', 'double', 'float', 'integer', 'int64', 'logical', - 'long', 'short', 'ushort', 'enumeric', 'numeric', 'snumeric'), - prefix=r'\b', suffix=r'\s*\b'), - Keyword.Type), - - # Operators - (r'[\%^*+\-/<>]', Operator), - - # punctuation: - (r'[\[\]():@$!&|.,\\{}]', Punctuation), - (r'[=:]', Punctuation), - - # Intrinsics - (words(( - 'abs', 'acos', 'addfile', 'addfiles', 'all', 'angmom_atm', 'any', - 'area_conserve_remap', 'area_hi2lores', 'area_poly_sphere', - 'asciiread', 'asciiwrite', 'asin', 'atan', 'atan2', 'attsetvalues', - 'avg', 'betainc', 'bin_avg', 'bin_sum', 'bw_bandpass_filter', - 'cancor', 'cbinread', 'cbinwrite', 'cd_calendar', 'cd_inv_calendar', - 'cdfbin_p', 'cdfbin_pr', 'cdfbin_s', 'cdfbin_xn', 'cdfchi_p', - 'cdfchi_x', 'cdfgam_p', 'cdfgam_x', 'cdfnor_p', 'cdfnor_x', - 'cdft_p', 'cdft_t', 'ceil', 'center_finite_diff', - 'center_finite_diff_n', 'cfftb', 'cfftf', 'cfftf_frq_reorder', - 'charactertodouble', 'charactertofloat', 'charactertointeger', - 'charactertolong', 'charactertoshort', 'charactertostring', - 'chartodouble', 'chartofloat', 'chartoint', 'chartointeger', - 'chartolong', 'chartoshort', 'chartostring', 'chiinv', 'clear', - 'color_index_to_rgba', 'conform', 'conform_dims', 'cos', 'cosh', - 'count_unique_values', 'covcorm', 'covcorm_xy', 'craybinnumrec', - 'craybinrecread', 'create_graphic', 'csa1', 'csa1d', 'csa1s', - 'csa1x', 'csa1xd', 'csa1xs', 'csa2', 'csa2d', 'csa2l', 'csa2ld', - 'csa2ls', 'csa2lx', 'csa2lxd', 'csa2lxs', 'csa2s', 'csa2x', - 'csa2xd', 'csa2xs', 'csa3', 'csa3d', 'csa3l', 'csa3ld', 'csa3ls', - 'csa3lx', 'csa3lxd', 'csa3lxs', 'csa3s', 'csa3x', 'csa3xd', - 'csa3xs', 'csc2s', 'csgetp', 'css2c', 'cssetp', 'cssgrid', 'csstri', - 'csvoro', 'cumsum', 'cz2ccm', 'datatondc', 'day_of_week', - 'day_of_year', 'days_in_month', 'default_fillvalue', 'delete', - 'depth_to_pres', 'destroy', 'determinant', 'dewtemp_trh', - 'dgeevx_lapack', 'dim_acumrun_n', 'dim_avg', 'dim_avg_n', - 'dim_avg_wgt', 'dim_avg_wgt_n', 'dim_cumsum', 'dim_cumsum_n', - 'dim_gamfit_n', 'dim_gbits', 'dim_max', 'dim_max_n', 'dim_median', - 'dim_median_n', 'dim_min', 'dim_min_n', 'dim_num', 'dim_num_n', - 'dim_numrun_n', 'dim_pqsort', 'dim_pqsort_n', 'dim_product', - 'dim_product_n', 'dim_rmsd', 'dim_rmsd_n', 'dim_rmvmean', - 'dim_rmvmean_n', 'dim_rmvmed', 'dim_rmvmed_n', 'dim_spi_n', - 'dim_standardize', 'dim_standardize_n', 'dim_stat4', 'dim_stat4_n', - 'dim_stddev', 'dim_stddev_n', 'dim_sum', 'dim_sum_n', 'dim_sum_wgt', - 'dim_sum_wgt_n', 'dim_variance', 'dim_variance_n', 'dimsizes', - 'doubletobyte', 'doubletochar', 'doubletocharacter', - 'doubletofloat', 'doubletoint', 'doubletointeger', 'doubletolong', - 'doubletoshort', 'dpres_hybrid_ccm', 'dpres_plevel', 'draw', - 'draw_color_palette', 'dsgetp', 'dsgrid2', 'dsgrid2d', 'dsgrid2s', - 'dsgrid3', 'dsgrid3d', 'dsgrid3s', 'dspnt2', 'dspnt2d', 'dspnt2s', - 'dspnt3', 'dspnt3d', 'dspnt3s', 'dssetp', 'dtrend', 'dtrend_msg', - 'dtrend_msg_n', 'dtrend_n', 'dtrend_quadratic', - 'dtrend_quadratic_msg_n', 'dv2uvf', 'dv2uvg', 'dz_height', - 'echo_off', 'echo_on', 'eof2data', 'eof_varimax', 'eofcor', - 'eofcor_pcmsg', 'eofcor_ts', 'eofcov', 'eofcov_pcmsg', 'eofcov_ts', - 'eofunc', 'eofunc_ts', 'eofunc_varimax', 'equiv_sample_size', 'erf', - 'erfc', 'esacr', 'esacv', 'esccr', 'esccv', 'escorc', 'escorc_n', - 'escovc', 'exit', 'exp', 'exp_tapersh', 'exp_tapersh_wgts', - 'exp_tapershC', 'ezfftb', 'ezfftb_n', 'ezfftf', 'ezfftf_n', - 'f2fosh', 'f2foshv', 'f2fsh', 'f2fshv', 'f2gsh', 'f2gshv', 'fabs', - 'fbindirread', 'fbindirwrite', 'fbinnumrec', 'fbinread', - 'fbinrecread', 'fbinrecwrite', 'fbinwrite', 'fft2db', 'fft2df', - 'fftshift', 'fileattdef', 'filechunkdimdef', 'filedimdef', - 'fileexists', 'filegrpdef', 'filevarattdef', 'filevarchunkdef', - 'filevarcompressleveldef', 'filevardef', 'filevardimsizes', - 'filwgts_lancos', 'filwgts_lanczos', 'filwgts_normal', - 'floattobyte', 'floattochar', 'floattocharacter', 'floattoint', - 'floattointeger', 'floattolong', 'floattoshort', 'floor', - 'fluxEddy', 'fo2fsh', 'fo2fshv', 'fourier_info', 'frame', 'fspan', - 'ftcurv', 'ftcurvd', 'ftcurvi', 'ftcurvp', 'ftcurvpi', 'ftcurvps', - 'ftcurvs', 'ftest', 'ftgetp', 'ftkurv', 'ftkurvd', 'ftkurvp', - 'ftkurvpd', 'ftsetp', 'ftsurf', 'g2fsh', 'g2fshv', 'g2gsh', - 'g2gshv', 'gamma', 'gammainc', 'gaus', 'gaus_lobat', - 'gaus_lobat_wgt', 'gc_aangle', 'gc_clkwise', 'gc_dangle', - 'gc_inout', 'gc_latlon', 'gc_onarc', 'gc_pnt2gc', 'gc_qarea', - 'gc_tarea', 'generate_2d_array', 'get_color_index', - 'get_color_rgba', 'get_cpu_time', 'get_isolines', 'get_ncl_version', - 'get_script_name', 'get_script_prefix_name', 'get_sphere_radius', - 'get_unique_values', 'getbitsone', 'getenv', 'getfiledimsizes', - 'getfilegrpnames', 'getfilepath', 'getfilevaratts', - 'getfilevarchunkdimsizes', 'getfilevardims', 'getfilevardimsizes', - 'getfilevarnames', 'getfilevartypes', 'getvaratts', 'getvardims', - 'gradsf', 'gradsg', 'greg2jul', 'grid2triple', 'hlsrgb', 'hsvrgb', - 'hydro', 'hyi2hyo', 'idsfft', 'igradsf', 'igradsg', 'ilapsf', - 'ilapsg', 'ilapvf', 'ilapvg', 'ind', 'ind_resolve', 'int2p', - 'int2p_n', 'integertobyte', 'integertochar', 'integertocharacter', - 'integertoshort', 'inttobyte', 'inttochar', 'inttoshort', - 'inverse_matrix', 'isatt', 'isbigendian', 'isbyte', 'ischar', - 'iscoord', 'isdefined', 'isdim', 'isdimnamed', 'isdouble', - 'isenumeric', 'isfile', 'isfilepresent', 'isfilevar', - 'isfilevaratt', 'isfilevarcoord', 'isfilevardim', 'isfloat', - 'isfunc', 'isgraphic', 'isint', 'isint64', 'isinteger', - 'isleapyear', 'islogical', 'islong', 'ismissing', 'isnan_ieee', - 'isnumeric', 'ispan', 'isproc', 'isshort', 'issnumeric', 'isstring', - 'isubyte', 'isuint', 'isuint64', 'isulong', 'isunlimited', - 'isunsigned', 'isushort', 'isvar', 'jul2greg', 'kmeans_as136', - 'kolsm2_n', 'kron_product', 'lapsf', 'lapsg', 'lapvf', 'lapvg', - 'latlon2utm', 'lclvl', 'lderuvf', 'lderuvg', 'linint1', 'linint1_n', - 'linint2', 'linint2_points', 'linmsg', 'linmsg_n', 'linrood_latwgt', - 'linrood_wgt', 'list_files', 'list_filevars', 'list_hlus', - 'list_procfuncs', 'list_vars', 'ListAppend', 'ListCount', - 'ListGetType', 'ListIndex', 'ListIndexFromName', 'ListPop', - 'ListPush', 'ListSetType', 'loadscript', 'local_max', 'local_min', - 'log', 'log10', 'longtobyte', 'longtochar', 'longtocharacter', - 'longtoint', 'longtointeger', 'longtoshort', 'lspoly', 'lspoly_n', - 'mask', 'max', 'maxind', 'min', 'minind', 'mixed_layer_depth', - 'mixhum_ptd', 'mixhum_ptrh', 'mjo_cross_coh2pha', - 'mjo_cross_segment', 'moc_globe_atl', 'monthday', 'natgrid', - 'natgridd', 'natgrids', 'ncargpath', 'ncargversion', 'ndctodata', - 'ndtooned', 'new', 'NewList', 'ngezlogo', 'nggcog', 'nggetp', - 'nglogo', 'ngsetp', 'NhlAddAnnotation', 'NhlAddData', - 'NhlAddOverlay', 'NhlAddPrimitive', 'NhlAppGetDefaultParentId', - 'NhlChangeWorkstation', 'NhlClassName', 'NhlClearWorkstation', - 'NhlDataPolygon', 'NhlDataPolyline', 'NhlDataPolymarker', - 'NhlDataToNDC', 'NhlDestroy', 'NhlDraw', 'NhlFrame', 'NhlFreeColor', - 'NhlGetBB', 'NhlGetClassResources', 'NhlGetErrorObjectId', - 'NhlGetNamedColorIndex', 'NhlGetParentId', - 'NhlGetParentWorkstation', 'NhlGetWorkspaceObjectId', - 'NhlIsAllocatedColor', 'NhlIsApp', 'NhlIsDataComm', 'NhlIsDataItem', - 'NhlIsDataSpec', 'NhlIsTransform', 'NhlIsView', 'NhlIsWorkstation', - 'NhlName', 'NhlNDCPolygon', 'NhlNDCPolyline', 'NhlNDCPolymarker', - 'NhlNDCToData', 'NhlNewColor', 'NhlNewDashPattern', 'NhlNewMarker', - 'NhlPalGetDefined', 'NhlRemoveAnnotation', 'NhlRemoveData', - 'NhlRemoveOverlay', 'NhlRemovePrimitive', 'NhlSetColor', - 'NhlSetDashPattern', 'NhlSetMarker', 'NhlUpdateData', - 'NhlUpdateWorkstation', 'nice_mnmxintvl', 'nngetaspectd', - 'nngetaspects', 'nngetp', 'nngetsloped', 'nngetslopes', 'nngetwts', - 'nngetwtsd', 'nnpnt', 'nnpntd', 'nnpntend', 'nnpntendd', - 'nnpntinit', 'nnpntinitd', 'nnpntinits', 'nnpnts', 'nnsetp', 'num', - 'obj_anal_ic', 'omega_ccm', 'onedtond', 'overlay', 'paleo_outline', - 'pdfxy_bin', 'poisson_grid_fill', 'pop_remap', 'potmp_insitu_ocn', - 'prcwater_dp', 'pres2hybrid', 'pres_hybrid_ccm', 'pres_sigma', - 'print', 'print_table', 'printFileVarSummary', 'printVarSummary', - 'product', 'pslec', 'pslhor', 'pslhyp', 'qsort', 'rand', - 'random_chi', 'random_gamma', 'random_normal', 'random_setallseed', - 'random_uniform', 'rcm2points', 'rcm2rgrid', 'rdsstoi', - 'read_colormap_file', 'reg_multlin', 'regcoef', 'regCoef_n', - 'regline', 'relhum', 'replace_ieeenan', 'reshape', 'reshape_ind', - 'rgba_to_color_index', 'rgbhls', 'rgbhsv', 'rgbyiq', 'rgrid2rcm', - 'rhomb_trunc', 'rip_cape_2d', 'rip_cape_3d', 'round', 'rtest', - 'runave', 'runave_n', 'set_default_fillvalue', 'set_sphere_radius', - 'setfileoption', 'sfvp2uvf', 'sfvp2uvg', 'shaec', 'shagc', - 'shgetnp', 'shgetp', 'shgrid', 'shorttobyte', 'shorttochar', - 'shorttocharacter', 'show_ascii', 'shsec', 'shsetp', 'shsgc', - 'shsgc_R42', 'sigma2hybrid', 'simpeq', 'simpne', 'sin', - 'sindex_yrmo', 'sinh', 'sizeof', 'sleep', 'smth9', 'snindex_yrmo', - 'solve_linsys', 'span_color_indexes', 'span_color_rgba', - 'sparse_matrix_mult', 'spcorr', 'spcorr_n', 'specx_anal', - 'specxy_anal', 'spei', 'sprintf', 'sprinti', 'sqrt', 'sqsort', - 'srand', 'stat2', 'stat4', 'stat_medrng', 'stat_trim', - 'status_exit', 'stdatmus_p2tdz', 'stdatmus_z2tdp', 'stddev', - 'str_capital', 'str_concat', 'str_fields_count', 'str_get_cols', - 'str_get_dq', 'str_get_field', 'str_get_nl', 'str_get_sq', - 'str_get_tab', 'str_index_of_substr', 'str_insert', 'str_is_blank', - 'str_join', 'str_left_strip', 'str_lower', 'str_match', - 'str_match_ic', 'str_match_ic_regex', 'str_match_ind', - 'str_match_ind_ic', 'str_match_ind_ic_regex', 'str_match_ind_regex', - 'str_match_regex', 'str_right_strip', 'str_split', - 'str_split_by_length', 'str_split_csv', 'str_squeeze', 'str_strip', - 'str_sub_str', 'str_switch', 'str_upper', 'stringtochar', - 'stringtocharacter', 'stringtodouble', 'stringtofloat', - 'stringtoint', 'stringtointeger', 'stringtolong', 'stringtoshort', - 'strlen', 'student_t', 'sum', 'svd_lapack', 'svdcov', 'svdcov_sv', - 'svdstd', 'svdstd_sv', 'system', 'systemfunc', 'tan', 'tanh', - 'taper', 'taper_n', 'tdclrs', 'tdctri', 'tdcudp', 'tdcurv', - 'tddtri', 'tdez2d', 'tdez3d', 'tdgetp', 'tdgrds', 'tdgrid', - 'tdgtrs', 'tdinit', 'tditri', 'tdlbla', 'tdlblp', 'tdlbls', - 'tdline', 'tdlndp', 'tdlnpa', 'tdlpdp', 'tdmtri', 'tdotri', - 'tdpara', 'tdplch', 'tdprpa', 'tdprpi', 'tdprpt', 'tdsetp', - 'tdsort', 'tdstri', 'tdstrs', 'tdttri', 'thornthwaite', 'tobyte', - 'tochar', 'todouble', 'tofloat', 'toint', 'toint64', 'tointeger', - 'tolong', 'toshort', 'tosigned', 'tostring', 'tostring_with_format', - 'totype', 'toubyte', 'touint', 'touint64', 'toulong', 'tounsigned', - 'toushort', 'trend_manken', 'tri_trunc', 'triple2grid', - 'triple2grid2d', 'trop_wmo', 'ttest', 'typeof', 'undef', - 'unique_string', 'update', 'ushorttoint', 'ut_calendar', - 'ut_inv_calendar', 'utm2latlon', 'uv2dv_cfd', 'uv2dvf', 'uv2dvg', - 'uv2sfvpf', 'uv2sfvpg', 'uv2vr_cfd', 'uv2vrdvf', 'uv2vrdvg', - 'uv2vrf', 'uv2vrg', 'v5d_close', 'v5d_create', 'v5d_setLowLev', - 'v5d_setUnits', 'v5d_write', 'v5d_write_var', 'variance', 'vhaec', - 'vhagc', 'vhsec', 'vhsgc', 'vibeta', 'vinth2p', 'vinth2p_ecmwf', - 'vinth2p_ecmwf_nodes', 'vinth2p_nodes', 'vintp2p_ecmwf', 'vr2uvf', - 'vr2uvg', 'vrdv2uvf', 'vrdv2uvg', 'wavelet', 'wavelet_default', - 'weibull', 'wgt_area_smooth', 'wgt_areaave', 'wgt_areaave2', - 'wgt_arearmse', 'wgt_arearmse2', 'wgt_areasum2', 'wgt_runave', - 'wgt_runave_n', 'wgt_vert_avg_beta', 'wgt_volave', 'wgt_volave_ccm', - 'wgt_volrmse', 'wgt_volrmse_ccm', 'where', 'wk_smooth121', 'wmbarb', - 'wmbarbmap', 'wmdrft', 'wmgetp', 'wmlabs', 'wmsetp', 'wmstnm', - 'wmvect', 'wmvectmap', 'wmvlbl', 'wrf_avo', 'wrf_cape_2d', - 'wrf_cape_3d', 'wrf_dbz', 'wrf_eth', 'wrf_helicity', 'wrf_ij_to_ll', - 'wrf_interp_1d', 'wrf_interp_2d_xy', 'wrf_interp_3d_z', - 'wrf_latlon_to_ij', 'wrf_ll_to_ij', 'wrf_omega', 'wrf_pvo', - 'wrf_rh', 'wrf_slp', 'wrf_smooth_2d', 'wrf_td', 'wrf_tk', - 'wrf_updraft_helicity', 'wrf_uvmet', 'wrf_virtual_temp', - 'wrf_wetbulb', 'wrf_wps_close_int', 'wrf_wps_open_int', - 'wrf_wps_rddata_int', 'wrf_wps_rdhead_int', 'wrf_wps_read_int', - 'wrf_wps_write_int', 'write_matrix', 'write_table', 'yiqrgb', - 'z2geouv', 'zonal_mpsi', 'addfiles_GetVar', 'advect_variable', - 'area_conserve_remap_Wrap', 'area_hi2lores_Wrap', - 'array_append_record', 'assignFillValue', 'byte2flt', - 'byte2flt_hdf', 'calcDayAnomTLL', 'calcMonAnomLLLT', - 'calcMonAnomLLT', 'calcMonAnomTLL', 'calcMonAnomTLLL', - 'calculate_monthly_values', 'cd_convert', 'changeCase', - 'changeCaseChar', 'clmDayTLL', 'clmDayTLLL', 'clmMon2clmDay', - 'clmMonLLLT', 'clmMonLLT', 'clmMonTLL', 'clmMonTLLL', 'closest_val', - 'copy_VarAtts', 'copy_VarCoords', 'copy_VarCoords_1', - 'copy_VarCoords_2', 'copy_VarMeta', 'copyatt', 'crossp3', - 'cshstringtolist', 'cssgrid_Wrap', 'dble2flt', 'decimalPlaces', - 'delete_VarAtts', 'dim_avg_n_Wrap', 'dim_avg_wgt_n_Wrap', - 'dim_avg_wgt_Wrap', 'dim_avg_Wrap', 'dim_cumsum_n_Wrap', - 'dim_cumsum_Wrap', 'dim_max_n_Wrap', 'dim_min_n_Wrap', - 'dim_rmsd_n_Wrap', 'dim_rmsd_Wrap', 'dim_rmvmean_n_Wrap', - 'dim_rmvmean_Wrap', 'dim_rmvmed_n_Wrap', 'dim_rmvmed_Wrap', - 'dim_standardize_n_Wrap', 'dim_standardize_Wrap', - 'dim_stddev_n_Wrap', 'dim_stddev_Wrap', 'dim_sum_n_Wrap', - 'dim_sum_wgt_n_Wrap', 'dim_sum_wgt_Wrap', 'dim_sum_Wrap', - 'dim_variance_n_Wrap', 'dim_variance_Wrap', 'dpres_plevel_Wrap', - 'dtrend_leftdim', 'dv2uvF_Wrap', 'dv2uvG_Wrap', 'eof_north', - 'eofcor_Wrap', 'eofcov_Wrap', 'eofunc_north', 'eofunc_ts_Wrap', - 'eofunc_varimax_reorder', 'eofunc_varimax_Wrap', 'eofunc_Wrap', - 'epsZero', 'f2fosh_Wrap', 'f2foshv_Wrap', 'f2fsh_Wrap', - 'f2fshv_Wrap', 'f2gsh_Wrap', 'f2gshv_Wrap', 'fbindirSwap', - 'fbinseqSwap1', 'fbinseqSwap2', 'flt2dble', 'flt2string', - 'fo2fsh_Wrap', 'fo2fshv_Wrap', 'g2fsh_Wrap', 'g2fshv_Wrap', - 'g2gsh_Wrap', 'g2gshv_Wrap', 'generate_resample_indices', - 'generate_sample_indices', 'generate_unique_indices', - 'genNormalDist', 'get1Dindex', 'get1Dindex_Collapse', - 'get1Dindex_Exclude', 'get_file_suffix', 'GetFillColor', - 'GetFillColorIndex', 'getFillValue', 'getind_latlon2d', - 'getVarDimNames', 'getVarFillValue', 'grib_stime2itime', - 'hyi2hyo_Wrap', 'ilapsF_Wrap', 'ilapsG_Wrap', 'ind_nearest_coord', - 'indStrSubset', 'int2dble', 'int2flt', 'int2p_n_Wrap', 'int2p_Wrap', - 'isMonotonic', 'isStrSubset', 'latGau', 'latGauWgt', 'latGlobeF', - 'latGlobeFo', 'latRegWgt', 'linint1_n_Wrap', 'linint1_Wrap', - 'linint2_points_Wrap', 'linint2_Wrap', 'local_max_1d', - 'local_min_1d', 'lonFlip', 'lonGlobeF', 'lonGlobeFo', 'lonPivot', - 'merge_levels_sfc', 'mod', 'month_to_annual', - 'month_to_annual_weighted', 'month_to_season', 'month_to_season12', - 'month_to_seasonN', 'monthly_total_to_daily_mean', 'nameDim', - 'natgrid_Wrap', 'NewCosWeight', 'niceLatLon2D', 'NormCosWgtGlobe', - 'numAsciiCol', 'numAsciiRow', 'numeric2int', - 'obj_anal_ic_deprecated', 'obj_anal_ic_Wrap', 'omega_ccm_driver', - 'omega_to_w', 'oneDtostring', 'pack_values', 'pattern_cor', 'pdfx', - 'pdfxy', 'pdfxy_conform', 'pot_temp', 'pot_vort_hybrid', - 'pot_vort_isobaric', 'pres2hybrid_Wrap', 'print_clock', - 'printMinMax', 'quadroots', 'rcm2points_Wrap', 'rcm2rgrid_Wrap', - 'readAsciiHead', 'readAsciiTable', 'reg_multlin_stats', - 'region_ind', 'regline_stats', 'relhum_ttd', 'replaceSingleChar', - 'RGBtoCmap', 'rgrid2rcm_Wrap', 'rho_mwjf', 'rm_single_dims', - 'rmAnnCycle1D', 'rmInsufData', 'rmMonAnnCycLLLT', 'rmMonAnnCycLLT', - 'rmMonAnnCycTLL', 'runave_n_Wrap', 'runave_Wrap', 'short2flt', - 'short2flt_hdf', 'shsgc_R42_Wrap', 'sign_f90', 'sign_matlab', - 'smth9_Wrap', 'smthClmDayTLL', 'smthClmDayTLLL', 'SqrtCosWeight', - 'stat_dispersion', 'static_stability', 'stdMonLLLT', 'stdMonLLT', - 'stdMonTLL', 'stdMonTLLL', 'symMinMaxPlt', 'table_attach_columns', - 'table_attach_rows', 'time_to_newtime', 'transpose', - 'triple2grid_Wrap', 'ut_convert', 'uv2dvF_Wrap', 'uv2dvG_Wrap', - 'uv2vrF_Wrap', 'uv2vrG_Wrap', 'vr2uvF_Wrap', 'vr2uvG_Wrap', - 'w_to_omega', 'wallClockElapseTime', 'wave_number_spc', - 'wgt_areaave_Wrap', 'wgt_runave_leftdim', 'wgt_runave_n_Wrap', - 'wgt_runave_Wrap', 'wgt_vertical_n', 'wind_component', - 'wind_direction', 'yyyyddd_to_yyyymmdd', 'yyyymm_time', - 'yyyymm_to_yyyyfrac', 'yyyymmdd_time', 'yyyymmdd_to_yyyyddd', - 'yyyymmdd_to_yyyyfrac', 'yyyymmddhh_time', 'yyyymmddhh_to_yyyyfrac', - 'zonal_mpsi_Wrap', 'zonalAve', 'calendar_decode2', 'cd_string', - 'kf_filter', 'run_cor', 'time_axis_labels', 'ut_string', - 'wrf_contour', 'wrf_map', 'wrf_map_overlay', 'wrf_map_overlays', - 'wrf_map_resources', 'wrf_map_zoom', 'wrf_overlay', 'wrf_overlays', - 'wrf_user_getvar', 'wrf_user_ij_to_ll', 'wrf_user_intrp2d', - 'wrf_user_intrp3d', 'wrf_user_latlon_to_ij', 'wrf_user_list_times', - 'wrf_user_ll_to_ij', 'wrf_user_unstagger', 'wrf_user_vert_interp', - 'wrf_vector', 'gsn_add_annotation', 'gsn_add_polygon', - 'gsn_add_polyline', 'gsn_add_polymarker', - 'gsn_add_shapefile_polygons', 'gsn_add_shapefile_polylines', - 'gsn_add_shapefile_polymarkers', 'gsn_add_text', 'gsn_attach_plots', - 'gsn_blank_plot', 'gsn_contour', 'gsn_contour_map', - 'gsn_contour_shade', 'gsn_coordinates', 'gsn_create_labelbar', - 'gsn_create_legend', 'gsn_create_text', - 'gsn_csm_attach_zonal_means', 'gsn_csm_blank_plot', - 'gsn_csm_contour', 'gsn_csm_contour_map', 'gsn_csm_contour_map_ce', - 'gsn_csm_contour_map_overlay', 'gsn_csm_contour_map_polar', - 'gsn_csm_hov', 'gsn_csm_lat_time', 'gsn_csm_map', 'gsn_csm_map_ce', - 'gsn_csm_map_polar', 'gsn_csm_pres_hgt', - 'gsn_csm_pres_hgt_streamline', 'gsn_csm_pres_hgt_vector', - 'gsn_csm_streamline', 'gsn_csm_streamline_contour_map', - 'gsn_csm_streamline_contour_map_ce', - 'gsn_csm_streamline_contour_map_polar', 'gsn_csm_streamline_map', - 'gsn_csm_streamline_map_ce', 'gsn_csm_streamline_map_polar', - 'gsn_csm_streamline_scalar', 'gsn_csm_streamline_scalar_map', - 'gsn_csm_streamline_scalar_map_ce', - 'gsn_csm_streamline_scalar_map_polar', 'gsn_csm_time_lat', - 'gsn_csm_vector', 'gsn_csm_vector_map', 'gsn_csm_vector_map_ce', - 'gsn_csm_vector_map_polar', 'gsn_csm_vector_scalar', - 'gsn_csm_vector_scalar_map', 'gsn_csm_vector_scalar_map_ce', - 'gsn_csm_vector_scalar_map_polar', 'gsn_csm_x2y', 'gsn_csm_x2y2', - 'gsn_csm_xy', 'gsn_csm_xy2', 'gsn_csm_xy3', 'gsn_csm_y', - 'gsn_define_colormap', 'gsn_draw_colormap', 'gsn_draw_named_colors', - 'gsn_histogram', 'gsn_labelbar_ndc', 'gsn_legend_ndc', 'gsn_map', - 'gsn_merge_colormaps', 'gsn_open_wks', 'gsn_panel', 'gsn_polygon', - 'gsn_polygon_ndc', 'gsn_polyline', 'gsn_polyline_ndc', - 'gsn_polymarker', 'gsn_polymarker_ndc', 'gsn_retrieve_colormap', - 'gsn_reverse_colormap', 'gsn_streamline', 'gsn_streamline_map', - 'gsn_streamline_scalar', 'gsn_streamline_scalar_map', 'gsn_table', - 'gsn_text', 'gsn_text_ndc', 'gsn_vector', 'gsn_vector_map', - 'gsn_vector_scalar', 'gsn_vector_scalar_map', 'gsn_xy', 'gsn_y', - 'hsv2rgb', 'maximize_output', 'namedcolor2rgb', 'namedcolor2rgba', - 'reset_device_coordinates', 'span_named_colors'), prefix=r'\b'), - Name.Builtin), - - # Resources - (words(( - 'amDataXF', 'amDataYF', 'amJust', 'amOn', 'amOrthogonalPosF', - 'amParallelPosF', 'amResizeNotify', 'amSide', 'amTrackData', - 'amViewId', 'amZone', 'appDefaultParent', 'appFileSuffix', - 'appResources', 'appSysDir', 'appUsrDir', 'caCopyArrays', - 'caXArray', 'caXCast', 'caXMaxV', 'caXMinV', 'caXMissingV', - 'caYArray', 'caYCast', 'caYMaxV', 'caYMinV', 'caYMissingV', - 'cnCellFillEdgeColor', 'cnCellFillMissingValEdgeColor', - 'cnConpackParams', 'cnConstFEnableFill', 'cnConstFLabelAngleF', - 'cnConstFLabelBackgroundColor', 'cnConstFLabelConstantSpacingF', - 'cnConstFLabelFont', 'cnConstFLabelFontAspectF', - 'cnConstFLabelFontColor', 'cnConstFLabelFontHeightF', - 'cnConstFLabelFontQuality', 'cnConstFLabelFontThicknessF', - 'cnConstFLabelFormat', 'cnConstFLabelFuncCode', 'cnConstFLabelJust', - 'cnConstFLabelOn', 'cnConstFLabelOrthogonalPosF', - 'cnConstFLabelParallelPosF', 'cnConstFLabelPerimColor', - 'cnConstFLabelPerimOn', 'cnConstFLabelPerimSpaceF', - 'cnConstFLabelPerimThicknessF', 'cnConstFLabelSide', - 'cnConstFLabelString', 'cnConstFLabelTextDirection', - 'cnConstFLabelZone', 'cnConstFUseInfoLabelRes', - 'cnExplicitLabelBarLabelsOn', 'cnExplicitLegendLabelsOn', - 'cnExplicitLineLabelsOn', 'cnFillBackgroundColor', 'cnFillColor', - 'cnFillColors', 'cnFillDotSizeF', 'cnFillDrawOrder', 'cnFillMode', - 'cnFillOn', 'cnFillOpacityF', 'cnFillPalette', 'cnFillPattern', - 'cnFillPatterns', 'cnFillScaleF', 'cnFillScales', 'cnFixFillBleed', - 'cnGridBoundFillColor', 'cnGridBoundFillPattern', - 'cnGridBoundFillScaleF', 'cnGridBoundPerimColor', - 'cnGridBoundPerimDashPattern', 'cnGridBoundPerimOn', - 'cnGridBoundPerimThicknessF', 'cnHighLabelAngleF', - 'cnHighLabelBackgroundColor', 'cnHighLabelConstantSpacingF', - 'cnHighLabelCount', 'cnHighLabelFont', 'cnHighLabelFontAspectF', - 'cnHighLabelFontColor', 'cnHighLabelFontHeightF', - 'cnHighLabelFontQuality', 'cnHighLabelFontThicknessF', - 'cnHighLabelFormat', 'cnHighLabelFuncCode', 'cnHighLabelPerimColor', - 'cnHighLabelPerimOn', 'cnHighLabelPerimSpaceF', - 'cnHighLabelPerimThicknessF', 'cnHighLabelString', 'cnHighLabelsOn', - 'cnHighLowLabelOverlapMode', 'cnHighUseLineLabelRes', - 'cnInfoLabelAngleF', 'cnInfoLabelBackgroundColor', - 'cnInfoLabelConstantSpacingF', 'cnInfoLabelFont', - 'cnInfoLabelFontAspectF', 'cnInfoLabelFontColor', - 'cnInfoLabelFontHeightF', 'cnInfoLabelFontQuality', - 'cnInfoLabelFontThicknessF', 'cnInfoLabelFormat', - 'cnInfoLabelFuncCode', 'cnInfoLabelJust', 'cnInfoLabelOn', - 'cnInfoLabelOrthogonalPosF', 'cnInfoLabelParallelPosF', - 'cnInfoLabelPerimColor', 'cnInfoLabelPerimOn', - 'cnInfoLabelPerimSpaceF', 'cnInfoLabelPerimThicknessF', - 'cnInfoLabelSide', 'cnInfoLabelString', 'cnInfoLabelTextDirection', - 'cnInfoLabelZone', 'cnLabelBarEndLabelsOn', 'cnLabelBarEndStyle', - 'cnLabelDrawOrder', 'cnLabelMasking', 'cnLabelScaleFactorF', - 'cnLabelScaleValueF', 'cnLabelScalingMode', 'cnLegendLevelFlags', - 'cnLevelCount', 'cnLevelFlag', 'cnLevelFlags', 'cnLevelSelectionMode', - 'cnLevelSpacingF', 'cnLevels', 'cnLineColor', 'cnLineColors', - 'cnLineDashPattern', 'cnLineDashPatterns', 'cnLineDashSegLenF', - 'cnLineDrawOrder', 'cnLineLabelAngleF', 'cnLineLabelBackgroundColor', - 'cnLineLabelConstantSpacingF', 'cnLineLabelCount', - 'cnLineLabelDensityF', 'cnLineLabelFont', 'cnLineLabelFontAspectF', - 'cnLineLabelFontColor', 'cnLineLabelFontColors', - 'cnLineLabelFontHeightF', 'cnLineLabelFontQuality', - 'cnLineLabelFontThicknessF', 'cnLineLabelFormat', - 'cnLineLabelFuncCode', 'cnLineLabelInterval', 'cnLineLabelPerimColor', - 'cnLineLabelPerimOn', 'cnLineLabelPerimSpaceF', - 'cnLineLabelPerimThicknessF', 'cnLineLabelPlacementMode', - 'cnLineLabelStrings', 'cnLineLabelsOn', 'cnLinePalette', - 'cnLineThicknessF', 'cnLineThicknesses', 'cnLinesOn', - 'cnLowLabelAngleF', 'cnLowLabelBackgroundColor', - 'cnLowLabelConstantSpacingF', 'cnLowLabelCount', 'cnLowLabelFont', - 'cnLowLabelFontAspectF', 'cnLowLabelFontColor', - 'cnLowLabelFontHeightF', 'cnLowLabelFontQuality', - 'cnLowLabelFontThicknessF', 'cnLowLabelFormat', 'cnLowLabelFuncCode', - 'cnLowLabelPerimColor', 'cnLowLabelPerimOn', 'cnLowLabelPerimSpaceF', - 'cnLowLabelPerimThicknessF', 'cnLowLabelString', 'cnLowLabelsOn', - 'cnLowUseHighLabelRes', 'cnMaxDataValueFormat', 'cnMaxLevelCount', - 'cnMaxLevelValF', 'cnMaxPointDistanceF', 'cnMinLevelValF', - 'cnMissingValFillColor', 'cnMissingValFillPattern', - 'cnMissingValFillScaleF', 'cnMissingValPerimColor', - 'cnMissingValPerimDashPattern', 'cnMissingValPerimGridBoundOn', - 'cnMissingValPerimOn', 'cnMissingValPerimThicknessF', - 'cnMonoFillColor', 'cnMonoFillPattern', 'cnMonoFillScale', - 'cnMonoLevelFlag', 'cnMonoLineColor', 'cnMonoLineDashPattern', - 'cnMonoLineLabelFontColor', 'cnMonoLineThickness', 'cnNoDataLabelOn', - 'cnNoDataLabelString', 'cnOutOfRangeFillColor', - 'cnOutOfRangeFillPattern', 'cnOutOfRangeFillScaleF', - 'cnOutOfRangePerimColor', 'cnOutOfRangePerimDashPattern', - 'cnOutOfRangePerimOn', 'cnOutOfRangePerimThicknessF', - 'cnRasterCellSizeF', 'cnRasterMinCellSizeF', 'cnRasterModeOn', - 'cnRasterSampleFactorF', 'cnRasterSmoothingOn', 'cnScalarFieldData', - 'cnSmoothingDistanceF', 'cnSmoothingOn', 'cnSmoothingTensionF', - 'cnSpanFillPalette', 'cnSpanLinePalette', 'ctCopyTables', - 'ctXElementSize', 'ctXMaxV', 'ctXMinV', 'ctXMissingV', 'ctXTable', - 'ctXTableLengths', 'ctXTableType', 'ctYElementSize', 'ctYMaxV', - 'ctYMinV', 'ctYMissingV', 'ctYTable', 'ctYTableLengths', - 'ctYTableType', 'dcDelayCompute', 'errBuffer', - 'errFileName', 'errFilePtr', 'errLevel', 'errPrint', 'errUnitNumber', - 'gsClipOn', 'gsColors', 'gsEdgeColor', 'gsEdgeDashPattern', - 'gsEdgeDashSegLenF', 'gsEdgeThicknessF', 'gsEdgesOn', - 'gsFillBackgroundColor', 'gsFillColor', 'gsFillDotSizeF', - 'gsFillIndex', 'gsFillLineThicknessF', 'gsFillOpacityF', - 'gsFillScaleF', 'gsFont', 'gsFontAspectF', 'gsFontColor', - 'gsFontHeightF', 'gsFontOpacityF', 'gsFontQuality', - 'gsFontThicknessF', 'gsLineColor', 'gsLineDashPattern', - 'gsLineDashSegLenF', 'gsLineLabelConstantSpacingF', 'gsLineLabelFont', - 'gsLineLabelFontAspectF', 'gsLineLabelFontColor', - 'gsLineLabelFontHeightF', 'gsLineLabelFontQuality', - 'gsLineLabelFontThicknessF', 'gsLineLabelFuncCode', - 'gsLineLabelString', 'gsLineOpacityF', 'gsLineThicknessF', - 'gsMarkerColor', 'gsMarkerIndex', 'gsMarkerOpacityF', 'gsMarkerSizeF', - 'gsMarkerThicknessF', 'gsSegments', 'gsTextAngleF', - 'gsTextConstantSpacingF', 'gsTextDirection', 'gsTextFuncCode', - 'gsTextJustification', 'gsnAboveYRefLineBarColors', - 'gsnAboveYRefLineBarFillScales', 'gsnAboveYRefLineBarPatterns', - 'gsnAboveYRefLineColor', 'gsnAddCyclic', 'gsnAttachBorderOn', - 'gsnAttachPlotsXAxis', 'gsnBelowYRefLineBarColors', - 'gsnBelowYRefLineBarFillScales', 'gsnBelowYRefLineBarPatterns', - 'gsnBelowYRefLineColor', 'gsnBoxMargin', 'gsnCenterString', - 'gsnCenterStringFontColor', 'gsnCenterStringFontHeightF', - 'gsnCenterStringFuncCode', 'gsnCenterStringOrthogonalPosF', - 'gsnCenterStringParallelPosF', 'gsnContourLineThicknessesScale', - 'gsnContourNegLineDashPattern', 'gsnContourPosLineDashPattern', - 'gsnContourZeroLineThicknessF', 'gsnDebugWriteFileName', 'gsnDraw', - 'gsnFrame', 'gsnHistogramBarWidthPercent', 'gsnHistogramBinIntervals', - 'gsnHistogramBinMissing', 'gsnHistogramBinWidth', - 'gsnHistogramClassIntervals', 'gsnHistogramCompare', - 'gsnHistogramComputePercentages', - 'gsnHistogramComputePercentagesNoMissing', - 'gsnHistogramDiscreteBinValues', 'gsnHistogramDiscreteClassValues', - 'gsnHistogramHorizontal', 'gsnHistogramMinMaxBinsOn', - 'gsnHistogramNumberOfBins', 'gsnHistogramPercentSign', - 'gsnHistogramSelectNiceIntervals', 'gsnLeftString', - 'gsnLeftStringFontColor', 'gsnLeftStringFontHeightF', - 'gsnLeftStringFuncCode', 'gsnLeftStringOrthogonalPosF', - 'gsnLeftStringParallelPosF', 'gsnMajorLatSpacing', - 'gsnMajorLonSpacing', 'gsnMaskLambertConformal', - 'gsnMaskLambertConformalOutlineOn', 'gsnMaximize', - 'gsnMinorLatSpacing', 'gsnMinorLonSpacing', 'gsnPanelBottom', - 'gsnPanelCenter', 'gsnPanelDebug', 'gsnPanelFigureStrings', - 'gsnPanelFigureStringsBackgroundFillColor', - 'gsnPanelFigureStringsFontHeightF', 'gsnPanelFigureStringsJust', - 'gsnPanelFigureStringsPerimOn', 'gsnPanelLabelBar', 'gsnPanelLeft', - 'gsnPanelMainFont', 'gsnPanelMainFontColor', - 'gsnPanelMainFontHeightF', 'gsnPanelMainString', 'gsnPanelRight', - 'gsnPanelRowSpec', 'gsnPanelScalePlotIndex', 'gsnPanelTop', - 'gsnPanelXF', 'gsnPanelXWhiteSpacePercent', 'gsnPanelYF', - 'gsnPanelYWhiteSpacePercent', 'gsnPaperHeight', 'gsnPaperMargin', - 'gsnPaperOrientation', 'gsnPaperWidth', 'gsnPolar', - 'gsnPolarLabelDistance', 'gsnPolarLabelFont', - 'gsnPolarLabelFontHeightF', 'gsnPolarLabelSpacing', 'gsnPolarTime', - 'gsnPolarUT', 'gsnRightString', 'gsnRightStringFontColor', - 'gsnRightStringFontHeightF', 'gsnRightStringFuncCode', - 'gsnRightStringOrthogonalPosF', 'gsnRightStringParallelPosF', - 'gsnScalarContour', 'gsnScale', 'gsnShape', 'gsnSpreadColorEnd', - 'gsnSpreadColorStart', 'gsnSpreadColors', 'gsnStringFont', - 'gsnStringFontColor', 'gsnStringFontHeightF', 'gsnStringFuncCode', - 'gsnTickMarksOn', 'gsnXAxisIrregular2Linear', 'gsnXAxisIrregular2Log', - 'gsnXRefLine', 'gsnXRefLineColor', 'gsnXRefLineDashPattern', - 'gsnXRefLineThicknessF', 'gsnXYAboveFillColors', 'gsnXYBarChart', - 'gsnXYBarChartBarWidth', 'gsnXYBarChartColors', - 'gsnXYBarChartColors2', 'gsnXYBarChartFillDotSizeF', - 'gsnXYBarChartFillLineThicknessF', 'gsnXYBarChartFillOpacityF', - 'gsnXYBarChartFillScaleF', 'gsnXYBarChartOutlineOnly', - 'gsnXYBarChartOutlineThicknessF', 'gsnXYBarChartPatterns', - 'gsnXYBarChartPatterns2', 'gsnXYBelowFillColors', 'gsnXYFillColors', - 'gsnXYFillOpacities', 'gsnXYLeftFillColors', 'gsnXYRightFillColors', - 'gsnYAxisIrregular2Linear', 'gsnYAxisIrregular2Log', 'gsnYRefLine', - 'gsnYRefLineColor', 'gsnYRefLineColors', 'gsnYRefLineDashPattern', - 'gsnYRefLineDashPatterns', 'gsnYRefLineThicknessF', - 'gsnYRefLineThicknesses', 'gsnZonalMean', 'gsnZonalMeanXMaxF', - 'gsnZonalMeanXMinF', 'gsnZonalMeanYRefLine', 'lbAutoManage', - 'lbBottomMarginF', 'lbBoxCount', 'lbBoxEndCapStyle', 'lbBoxFractions', - 'lbBoxLineColor', 'lbBoxLineDashPattern', 'lbBoxLineDashSegLenF', - 'lbBoxLineThicknessF', 'lbBoxLinesOn', 'lbBoxMajorExtentF', - 'lbBoxMinorExtentF', 'lbBoxSeparatorLinesOn', 'lbBoxSizing', - 'lbFillBackground', 'lbFillColor', 'lbFillColors', 'lbFillDotSizeF', - 'lbFillLineThicknessF', 'lbFillPattern', 'lbFillPatterns', - 'lbFillScaleF', 'lbFillScales', 'lbJustification', 'lbLabelAlignment', - 'lbLabelAngleF', 'lbLabelAutoStride', 'lbLabelBarOn', - 'lbLabelConstantSpacingF', 'lbLabelDirection', 'lbLabelFont', - 'lbLabelFontAspectF', 'lbLabelFontColor', 'lbLabelFontHeightF', - 'lbLabelFontQuality', 'lbLabelFontThicknessF', 'lbLabelFuncCode', - 'lbLabelJust', 'lbLabelOffsetF', 'lbLabelPosition', 'lbLabelStride', - 'lbLabelStrings', 'lbLabelsOn', 'lbLeftMarginF', 'lbMaxLabelLenF', - 'lbMinLabelSpacingF', 'lbMonoFillColor', 'lbMonoFillPattern', - 'lbMonoFillScale', 'lbOrientation', 'lbPerimColor', - 'lbPerimDashPattern', 'lbPerimDashSegLenF', 'lbPerimFill', - 'lbPerimFillColor', 'lbPerimOn', 'lbPerimThicknessF', - 'lbRasterFillOn', 'lbRightMarginF', 'lbTitleAngleF', - 'lbTitleConstantSpacingF', 'lbTitleDirection', 'lbTitleExtentF', - 'lbTitleFont', 'lbTitleFontAspectF', 'lbTitleFontColor', - 'lbTitleFontHeightF', 'lbTitleFontQuality', 'lbTitleFontThicknessF', - 'lbTitleFuncCode', 'lbTitleJust', 'lbTitleOffsetF', 'lbTitleOn', - 'lbTitlePosition', 'lbTitleString', 'lbTopMarginF', 'lgAutoManage', - 'lgBottomMarginF', 'lgBoxBackground', 'lgBoxLineColor', - 'lgBoxLineDashPattern', 'lgBoxLineDashSegLenF', 'lgBoxLineThicknessF', - 'lgBoxLinesOn', 'lgBoxMajorExtentF', 'lgBoxMinorExtentF', - 'lgDashIndex', 'lgDashIndexes', 'lgItemCount', 'lgItemOrder', - 'lgItemPlacement', 'lgItemPositions', 'lgItemType', 'lgItemTypes', - 'lgJustification', 'lgLabelAlignment', 'lgLabelAngleF', - 'lgLabelAutoStride', 'lgLabelConstantSpacingF', 'lgLabelDirection', - 'lgLabelFont', 'lgLabelFontAspectF', 'lgLabelFontColor', - 'lgLabelFontHeightF', 'lgLabelFontQuality', 'lgLabelFontThicknessF', - 'lgLabelFuncCode', 'lgLabelJust', 'lgLabelOffsetF', 'lgLabelPosition', - 'lgLabelStride', 'lgLabelStrings', 'lgLabelsOn', 'lgLeftMarginF', - 'lgLegendOn', 'lgLineColor', 'lgLineColors', 'lgLineDashSegLenF', - 'lgLineDashSegLens', 'lgLineLabelConstantSpacingF', 'lgLineLabelFont', - 'lgLineLabelFontAspectF', 'lgLineLabelFontColor', - 'lgLineLabelFontColors', 'lgLineLabelFontHeightF', - 'lgLineLabelFontHeights', 'lgLineLabelFontQuality', - 'lgLineLabelFontThicknessF', 'lgLineLabelFuncCode', - 'lgLineLabelStrings', 'lgLineLabelsOn', 'lgLineThicknessF', - 'lgLineThicknesses', 'lgMarkerColor', 'lgMarkerColors', - 'lgMarkerIndex', 'lgMarkerIndexes', 'lgMarkerSizeF', 'lgMarkerSizes', - 'lgMarkerThicknessF', 'lgMarkerThicknesses', 'lgMonoDashIndex', - 'lgMonoItemType', 'lgMonoLineColor', 'lgMonoLineDashSegLen', - 'lgMonoLineLabelFontColor', 'lgMonoLineLabelFontHeight', - 'lgMonoLineThickness', 'lgMonoMarkerColor', 'lgMonoMarkerIndex', - 'lgMonoMarkerSize', 'lgMonoMarkerThickness', 'lgOrientation', - 'lgPerimColor', 'lgPerimDashPattern', 'lgPerimDashSegLenF', - 'lgPerimFill', 'lgPerimFillColor', 'lgPerimOn', 'lgPerimThicknessF', - 'lgRightMarginF', 'lgTitleAngleF', 'lgTitleConstantSpacingF', - 'lgTitleDirection', 'lgTitleExtentF', 'lgTitleFont', - 'lgTitleFontAspectF', 'lgTitleFontColor', 'lgTitleFontHeightF', - 'lgTitleFontQuality', 'lgTitleFontThicknessF', 'lgTitleFuncCode', - 'lgTitleJust', 'lgTitleOffsetF', 'lgTitleOn', 'lgTitlePosition', - 'lgTitleString', 'lgTopMarginF', 'mpAreaGroupCount', - 'mpAreaMaskingOn', 'mpAreaNames', 'mpAreaTypes', 'mpBottomAngleF', - 'mpBottomMapPosF', 'mpBottomNDCF', 'mpBottomNPCF', - 'mpBottomPointLatF', 'mpBottomPointLonF', 'mpBottomWindowF', - 'mpCenterLatF', 'mpCenterLonF', 'mpCenterRotF', 'mpCountyLineColor', - 'mpCountyLineDashPattern', 'mpCountyLineDashSegLenF', - 'mpCountyLineThicknessF', 'mpDataBaseVersion', 'mpDataResolution', - 'mpDataSetName', 'mpDefaultFillColor', 'mpDefaultFillPattern', - 'mpDefaultFillScaleF', 'mpDynamicAreaGroups', 'mpEllipticalBoundary', - 'mpFillAreaSpecifiers', 'mpFillBoundarySets', 'mpFillColor', - 'mpFillColors', 'mpFillColors-default', 'mpFillDotSizeF', - 'mpFillDrawOrder', 'mpFillOn', 'mpFillPatternBackground', - 'mpFillPattern', 'mpFillPatterns', 'mpFillPatterns-default', - 'mpFillScaleF', 'mpFillScales', 'mpFillScales-default', - 'mpFixedAreaGroups', 'mpGeophysicalLineColor', - 'mpGeophysicalLineDashPattern', 'mpGeophysicalLineDashSegLenF', - 'mpGeophysicalLineThicknessF', 'mpGreatCircleLinesOn', - 'mpGridAndLimbDrawOrder', 'mpGridAndLimbOn', 'mpGridLatSpacingF', - 'mpGridLineColor', 'mpGridLineDashPattern', 'mpGridLineDashSegLenF', - 'mpGridLineThicknessF', 'mpGridLonSpacingF', 'mpGridMaskMode', - 'mpGridMaxLatF', 'mpGridPolarLonSpacingF', 'mpGridSpacingF', - 'mpInlandWaterFillColor', 'mpInlandWaterFillPattern', - 'mpInlandWaterFillScaleF', 'mpLabelDrawOrder', 'mpLabelFontColor', - 'mpLabelFontHeightF', 'mpLabelsOn', 'mpLambertMeridianF', - 'mpLambertParallel1F', 'mpLambertParallel2F', 'mpLandFillColor', - 'mpLandFillPattern', 'mpLandFillScaleF', 'mpLeftAngleF', - 'mpLeftCornerLatF', 'mpLeftCornerLonF', 'mpLeftMapPosF', - 'mpLeftNDCF', 'mpLeftNPCF', 'mpLeftPointLatF', - 'mpLeftPointLonF', 'mpLeftWindowF', 'mpLimbLineColor', - 'mpLimbLineDashPattern', 'mpLimbLineDashSegLenF', - 'mpLimbLineThicknessF', 'mpLimitMode', 'mpMaskAreaSpecifiers', - 'mpMaskOutlineSpecifiers', 'mpMaxLatF', 'mpMaxLonF', - 'mpMinLatF', 'mpMinLonF', 'mpMonoFillColor', 'mpMonoFillPattern', - 'mpMonoFillScale', 'mpNationalLineColor', 'mpNationalLineDashPattern', - 'mpNationalLineThicknessF', 'mpOceanFillColor', 'mpOceanFillPattern', - 'mpOceanFillScaleF', 'mpOutlineBoundarySets', 'mpOutlineDrawOrder', - 'mpOutlineMaskingOn', 'mpOutlineOn', 'mpOutlineSpecifiers', - 'mpPerimDrawOrder', 'mpPerimLineColor', 'mpPerimLineDashPattern', - 'mpPerimLineDashSegLenF', 'mpPerimLineThicknessF', 'mpPerimOn', - 'mpPolyMode', 'mpProjection', 'mpProvincialLineColor', - 'mpProvincialLineDashPattern', 'mpProvincialLineDashSegLenF', - 'mpProvincialLineThicknessF', 'mpRelativeCenterLat', - 'mpRelativeCenterLon', 'mpRightAngleF', 'mpRightCornerLatF', - 'mpRightCornerLonF', 'mpRightMapPosF', 'mpRightNDCF', - 'mpRightNPCF', 'mpRightPointLatF', 'mpRightPointLonF', - 'mpRightWindowF', 'mpSatelliteAngle1F', 'mpSatelliteAngle2F', - 'mpSatelliteDistF', 'mpShapeMode', 'mpSpecifiedFillColors', - 'mpSpecifiedFillDirectIndexing', 'mpSpecifiedFillPatterns', - 'mpSpecifiedFillPriority', 'mpSpecifiedFillScales', - 'mpTopAngleF', 'mpTopMapPosF', 'mpTopNDCF', 'mpTopNPCF', - 'mpTopPointLatF', 'mpTopPointLonF', 'mpTopWindowF', - 'mpUSStateLineColor', 'mpUSStateLineDashPattern', - 'mpUSStateLineDashSegLenF', 'mpUSStateLineThicknessF', - 'pmAnnoManagers', 'pmAnnoViews', 'pmLabelBarDisplayMode', - 'pmLabelBarHeightF', 'pmLabelBarKeepAspect', 'pmLabelBarOrthogonalPosF', - 'pmLabelBarParallelPosF', 'pmLabelBarSide', 'pmLabelBarWidthF', - 'pmLabelBarZone', 'pmLegendDisplayMode', 'pmLegendHeightF', - 'pmLegendKeepAspect', 'pmLegendOrthogonalPosF', - 'pmLegendParallelPosF', 'pmLegendSide', 'pmLegendWidthF', - 'pmLegendZone', 'pmOverlaySequenceIds', 'pmTickMarkDisplayMode', - 'pmTickMarkZone', 'pmTitleDisplayMode', 'pmTitleZone', - 'prGraphicStyle', 'prPolyType', 'prXArray', 'prYArray', - 'sfCopyData', 'sfDataArray', 'sfDataMaxV', 'sfDataMinV', - 'sfElementNodes', 'sfExchangeDimensions', 'sfFirstNodeIndex', - 'sfMissingValueV', 'sfXArray', 'sfXCActualEndF', 'sfXCActualStartF', - 'sfXCEndIndex', 'sfXCEndSubsetV', 'sfXCEndV', 'sfXCStartIndex', - 'sfXCStartSubsetV', 'sfXCStartV', 'sfXCStride', 'sfXCellBounds', - 'sfYArray', 'sfYCActualEndF', 'sfYCActualStartF', 'sfYCEndIndex', - 'sfYCEndSubsetV', 'sfYCEndV', 'sfYCStartIndex', 'sfYCStartSubsetV', - 'sfYCStartV', 'sfYCStride', 'sfYCellBounds', 'stArrowLengthF', - 'stArrowStride', 'stCrossoverCheckCount', - 'stExplicitLabelBarLabelsOn', 'stLabelBarEndLabelsOn', - 'stLabelFormat', 'stLengthCheckCount', 'stLevelColors', - 'stLevelCount', 'stLevelPalette', 'stLevelSelectionMode', - 'stLevelSpacingF', 'stLevels', 'stLineColor', 'stLineOpacityF', - 'stLineStartStride', 'stLineThicknessF', 'stMapDirection', - 'stMaxLevelCount', 'stMaxLevelValF', 'stMinArrowSpacingF', - 'stMinDistanceF', 'stMinLevelValF', 'stMinLineSpacingF', - 'stMinStepFactorF', 'stMonoLineColor', 'stNoDataLabelOn', - 'stNoDataLabelString', 'stScalarFieldData', 'stScalarMissingValColor', - 'stSpanLevelPalette', 'stStepSizeF', 'stStreamlineDrawOrder', - 'stUseScalarArray', 'stVectorFieldData', 'stZeroFLabelAngleF', - 'stZeroFLabelBackgroundColor', 'stZeroFLabelConstantSpacingF', - 'stZeroFLabelFont', 'stZeroFLabelFontAspectF', - 'stZeroFLabelFontColor', 'stZeroFLabelFontHeightF', - 'stZeroFLabelFontQuality', 'stZeroFLabelFontThicknessF', - 'stZeroFLabelFuncCode', 'stZeroFLabelJust', 'stZeroFLabelOn', - 'stZeroFLabelOrthogonalPosF', 'stZeroFLabelParallelPosF', - 'stZeroFLabelPerimColor', 'stZeroFLabelPerimOn', - 'stZeroFLabelPerimSpaceF', 'stZeroFLabelPerimThicknessF', - 'stZeroFLabelSide', 'stZeroFLabelString', 'stZeroFLabelTextDirection', - 'stZeroFLabelZone', 'tfDoNDCOverlay', 'tfPlotManagerOn', - 'tfPolyDrawList', 'tfPolyDrawOrder', 'tiDeltaF', 'tiMainAngleF', - 'tiMainConstantSpacingF', 'tiMainDirection', 'tiMainFont', - 'tiMainFontAspectF', 'tiMainFontColor', 'tiMainFontHeightF', - 'tiMainFontQuality', 'tiMainFontThicknessF', 'tiMainFuncCode', - 'tiMainJust', 'tiMainOffsetXF', 'tiMainOffsetYF', 'tiMainOn', - 'tiMainPosition', 'tiMainSide', 'tiMainString', 'tiUseMainAttributes', - 'tiXAxisAngleF', 'tiXAxisConstantSpacingF', 'tiXAxisDirection', - 'tiXAxisFont', 'tiXAxisFontAspectF', 'tiXAxisFontColor', - 'tiXAxisFontHeightF', 'tiXAxisFontQuality', 'tiXAxisFontThicknessF', - 'tiXAxisFuncCode', 'tiXAxisJust', 'tiXAxisOffsetXF', - 'tiXAxisOffsetYF', 'tiXAxisOn', 'tiXAxisPosition', 'tiXAxisSide', - 'tiXAxisString', 'tiYAxisAngleF', 'tiYAxisConstantSpacingF', - 'tiYAxisDirection', 'tiYAxisFont', 'tiYAxisFontAspectF', - 'tiYAxisFontColor', 'tiYAxisFontHeightF', 'tiYAxisFontQuality', - 'tiYAxisFontThicknessF', 'tiYAxisFuncCode', 'tiYAxisJust', - 'tiYAxisOffsetXF', 'tiYAxisOffsetYF', 'tiYAxisOn', 'tiYAxisPosition', - 'tiYAxisSide', 'tiYAxisString', 'tmBorderLineColor', - 'tmBorderThicknessF', 'tmEqualizeXYSizes', 'tmLabelAutoStride', - 'tmSciNoteCutoff', 'tmXBAutoPrecision', 'tmXBBorderOn', - 'tmXBDataLeftF', 'tmXBDataRightF', 'tmXBFormat', 'tmXBIrrTensionF', - 'tmXBIrregularPoints', 'tmXBLabelAngleF', 'tmXBLabelConstantSpacingF', - 'tmXBLabelDeltaF', 'tmXBLabelDirection', 'tmXBLabelFont', - 'tmXBLabelFontAspectF', 'tmXBLabelFontColor', 'tmXBLabelFontHeightF', - 'tmXBLabelFontQuality', 'tmXBLabelFontThicknessF', - 'tmXBLabelFuncCode', 'tmXBLabelJust', 'tmXBLabelStride', 'tmXBLabels', - 'tmXBLabelsOn', 'tmXBMajorLengthF', 'tmXBMajorLineColor', - 'tmXBMajorOutwardLengthF', 'tmXBMajorThicknessF', 'tmXBMaxLabelLenF', - 'tmXBMaxTicks', 'tmXBMinLabelSpacingF', 'tmXBMinorLengthF', - 'tmXBMinorLineColor', 'tmXBMinorOn', 'tmXBMinorOutwardLengthF', - 'tmXBMinorPerMajor', 'tmXBMinorThicknessF', 'tmXBMinorValues', - 'tmXBMode', 'tmXBOn', 'tmXBPrecision', 'tmXBStyle', 'tmXBTickEndF', - 'tmXBTickSpacingF', 'tmXBTickStartF', 'tmXBValues', 'tmXMajorGrid', - 'tmXMajorGridLineColor', 'tmXMajorGridLineDashPattern', - 'tmXMajorGridThicknessF', 'tmXMinorGrid', 'tmXMinorGridLineColor', - 'tmXMinorGridLineDashPattern', 'tmXMinorGridThicknessF', - 'tmXTAutoPrecision', 'tmXTBorderOn', 'tmXTDataLeftF', - 'tmXTDataRightF', 'tmXTFormat', 'tmXTIrrTensionF', - 'tmXTIrregularPoints', 'tmXTLabelAngleF', 'tmXTLabelConstantSpacingF', - 'tmXTLabelDeltaF', 'tmXTLabelDirection', 'tmXTLabelFont', - 'tmXTLabelFontAspectF', 'tmXTLabelFontColor', 'tmXTLabelFontHeightF', - 'tmXTLabelFontQuality', 'tmXTLabelFontThicknessF', - 'tmXTLabelFuncCode', 'tmXTLabelJust', 'tmXTLabelStride', 'tmXTLabels', - 'tmXTLabelsOn', 'tmXTMajorLengthF', 'tmXTMajorLineColor', - 'tmXTMajorOutwardLengthF', 'tmXTMajorThicknessF', 'tmXTMaxLabelLenF', - 'tmXTMaxTicks', 'tmXTMinLabelSpacingF', 'tmXTMinorLengthF', - 'tmXTMinorLineColor', 'tmXTMinorOn', 'tmXTMinorOutwardLengthF', - 'tmXTMinorPerMajor', 'tmXTMinorThicknessF', 'tmXTMinorValues', - 'tmXTMode', 'tmXTOn', 'tmXTPrecision', 'tmXTStyle', 'tmXTTickEndF', - 'tmXTTickSpacingF', 'tmXTTickStartF', 'tmXTValues', 'tmXUseBottom', - 'tmYLAutoPrecision', 'tmYLBorderOn', 'tmYLDataBottomF', - 'tmYLDataTopF', 'tmYLFormat', 'tmYLIrrTensionF', - 'tmYLIrregularPoints', 'tmYLLabelAngleF', 'tmYLLabelConstantSpacingF', - 'tmYLLabelDeltaF', 'tmYLLabelDirection', 'tmYLLabelFont', - 'tmYLLabelFontAspectF', 'tmYLLabelFontColor', 'tmYLLabelFontHeightF', - 'tmYLLabelFontQuality', 'tmYLLabelFontThicknessF', - 'tmYLLabelFuncCode', 'tmYLLabelJust', 'tmYLLabelStride', 'tmYLLabels', - 'tmYLLabelsOn', 'tmYLMajorLengthF', 'tmYLMajorLineColor', - 'tmYLMajorOutwardLengthF', 'tmYLMajorThicknessF', 'tmYLMaxLabelLenF', - 'tmYLMaxTicks', 'tmYLMinLabelSpacingF', 'tmYLMinorLengthF', - 'tmYLMinorLineColor', 'tmYLMinorOn', 'tmYLMinorOutwardLengthF', - 'tmYLMinorPerMajor', 'tmYLMinorThicknessF', 'tmYLMinorValues', - 'tmYLMode', 'tmYLOn', 'tmYLPrecision', 'tmYLStyle', 'tmYLTickEndF', - 'tmYLTickSpacingF', 'tmYLTickStartF', 'tmYLValues', 'tmYMajorGrid', - 'tmYMajorGridLineColor', 'tmYMajorGridLineDashPattern', - 'tmYMajorGridThicknessF', 'tmYMinorGrid', 'tmYMinorGridLineColor', - 'tmYMinorGridLineDashPattern', 'tmYMinorGridThicknessF', - 'tmYRAutoPrecision', 'tmYRBorderOn', 'tmYRDataBottomF', - 'tmYRDataTopF', 'tmYRFormat', 'tmYRIrrTensionF', - 'tmYRIrregularPoints', 'tmYRLabelAngleF', 'tmYRLabelConstantSpacingF', - 'tmYRLabelDeltaF', 'tmYRLabelDirection', 'tmYRLabelFont', - 'tmYRLabelFontAspectF', 'tmYRLabelFontColor', 'tmYRLabelFontHeightF', - 'tmYRLabelFontQuality', 'tmYRLabelFontThicknessF', - 'tmYRLabelFuncCode', 'tmYRLabelJust', 'tmYRLabelStride', 'tmYRLabels', - 'tmYRLabelsOn', 'tmYRMajorLengthF', 'tmYRMajorLineColor', - 'tmYRMajorOutwardLengthF', 'tmYRMajorThicknessF', 'tmYRMaxLabelLenF', - 'tmYRMaxTicks', 'tmYRMinLabelSpacingF', 'tmYRMinorLengthF', - 'tmYRMinorLineColor', 'tmYRMinorOn', 'tmYRMinorOutwardLengthF', - 'tmYRMinorPerMajor', 'tmYRMinorThicknessF', 'tmYRMinorValues', - 'tmYRMode', 'tmYROn', 'tmYRPrecision', 'tmYRStyle', 'tmYRTickEndF', - 'tmYRTickSpacingF', 'tmYRTickStartF', 'tmYRValues', 'tmYUseLeft', - 'trGridType', 'trLineInterpolationOn', - 'trXAxisType', 'trXCoordPoints', 'trXInterPoints', 'trXLog', - 'trXMaxF', 'trXMinF', 'trXReverse', 'trXSamples', 'trXTensionF', - 'trYAxisType', 'trYCoordPoints', 'trYInterPoints', 'trYLog', - 'trYMaxF', 'trYMinF', 'trYReverse', 'trYSamples', 'trYTensionF', - 'txAngleF', 'txBackgroundFillColor', 'txConstantSpacingF', 'txDirection', - 'txFont', 'HLU-Fonts', 'txFontAspectF', 'txFontColor', - 'txFontHeightF', 'txFontOpacityF', 'txFontQuality', - 'txFontThicknessF', 'txFuncCode', 'txJust', 'txPerimColor', - 'txPerimDashLengthF', 'txPerimDashPattern', 'txPerimOn', - 'txPerimSpaceF', 'txPerimThicknessF', 'txPosXF', 'txPosYF', - 'txString', 'vcExplicitLabelBarLabelsOn', 'vcFillArrowEdgeColor', - 'vcFillArrowEdgeThicknessF', 'vcFillArrowFillColor', - 'vcFillArrowHeadInteriorXF', 'vcFillArrowHeadMinFracXF', - 'vcFillArrowHeadMinFracYF', 'vcFillArrowHeadXF', 'vcFillArrowHeadYF', - 'vcFillArrowMinFracWidthF', 'vcFillArrowWidthF', 'vcFillArrowsOn', - 'vcFillOverEdge', 'vcGlyphOpacityF', 'vcGlyphStyle', - 'vcLabelBarEndLabelsOn', 'vcLabelFontColor', 'vcLabelFontHeightF', - 'vcLabelsOn', 'vcLabelsUseVectorColor', 'vcLevelColors', - 'vcLevelCount', 'vcLevelPalette', 'vcLevelSelectionMode', - 'vcLevelSpacingF', 'vcLevels', 'vcLineArrowColor', - 'vcLineArrowHeadMaxSizeF', 'vcLineArrowHeadMinSizeF', - 'vcLineArrowThicknessF', 'vcMagnitudeFormat', - 'vcMagnitudeScaleFactorF', 'vcMagnitudeScaleValueF', - 'vcMagnitudeScalingMode', 'vcMapDirection', 'vcMaxLevelCount', - 'vcMaxLevelValF', 'vcMaxMagnitudeF', 'vcMinAnnoAngleF', - 'vcMinAnnoArrowAngleF', 'vcMinAnnoArrowEdgeColor', - 'vcMinAnnoArrowFillColor', 'vcMinAnnoArrowLineColor', - 'vcMinAnnoArrowMinOffsetF', 'vcMinAnnoArrowSpaceF', - 'vcMinAnnoArrowUseVecColor', 'vcMinAnnoBackgroundColor', - 'vcMinAnnoConstantSpacingF', 'vcMinAnnoExplicitMagnitudeF', - 'vcMinAnnoFont', 'vcMinAnnoFontAspectF', 'vcMinAnnoFontColor', - 'vcMinAnnoFontHeightF', 'vcMinAnnoFontQuality', - 'vcMinAnnoFontThicknessF', 'vcMinAnnoFuncCode', 'vcMinAnnoJust', - 'vcMinAnnoOn', 'vcMinAnnoOrientation', 'vcMinAnnoOrthogonalPosF', - 'vcMinAnnoParallelPosF', 'vcMinAnnoPerimColor', 'vcMinAnnoPerimOn', - 'vcMinAnnoPerimSpaceF', 'vcMinAnnoPerimThicknessF', 'vcMinAnnoSide', - 'vcMinAnnoString1', 'vcMinAnnoString1On', 'vcMinAnnoString2', - 'vcMinAnnoString2On', 'vcMinAnnoTextDirection', 'vcMinAnnoZone', - 'vcMinDistanceF', 'vcMinFracLengthF', 'vcMinLevelValF', - 'vcMinMagnitudeF', 'vcMonoFillArrowEdgeColor', - 'vcMonoFillArrowFillColor', 'vcMonoLineArrowColor', - 'vcMonoWindBarbColor', 'vcNoDataLabelOn', 'vcNoDataLabelString', - 'vcPositionMode', 'vcRefAnnoAngleF', 'vcRefAnnoArrowAngleF', - 'vcRefAnnoArrowEdgeColor', 'vcRefAnnoArrowFillColor', - 'vcRefAnnoArrowLineColor', 'vcRefAnnoArrowMinOffsetF', - 'vcRefAnnoArrowSpaceF', 'vcRefAnnoArrowUseVecColor', - 'vcRefAnnoBackgroundColor', 'vcRefAnnoConstantSpacingF', - 'vcRefAnnoExplicitMagnitudeF', 'vcRefAnnoFont', - 'vcRefAnnoFontAspectF', 'vcRefAnnoFontColor', 'vcRefAnnoFontHeightF', - 'vcRefAnnoFontQuality', 'vcRefAnnoFontThicknessF', - 'vcRefAnnoFuncCode', 'vcRefAnnoJust', 'vcRefAnnoOn', - 'vcRefAnnoOrientation', 'vcRefAnnoOrthogonalPosF', - 'vcRefAnnoParallelPosF', 'vcRefAnnoPerimColor', 'vcRefAnnoPerimOn', - 'vcRefAnnoPerimSpaceF', 'vcRefAnnoPerimThicknessF', 'vcRefAnnoSide', - 'vcRefAnnoString1', 'vcRefAnnoString1On', 'vcRefAnnoString2', - 'vcRefAnnoString2On', 'vcRefAnnoTextDirection', 'vcRefAnnoZone', - 'vcRefLengthF', 'vcRefMagnitudeF', 'vcScalarFieldData', - 'vcScalarMissingValColor', 'vcScalarValueFormat', - 'vcScalarValueScaleFactorF', 'vcScalarValueScaleValueF', - 'vcScalarValueScalingMode', 'vcSpanLevelPalette', 'vcUseRefAnnoRes', - 'vcUseScalarArray', 'vcVectorDrawOrder', 'vcVectorFieldData', - 'vcWindBarbCalmCircleSizeF', 'vcWindBarbColor', - 'vcWindBarbLineThicknessF', 'vcWindBarbScaleFactorF', - 'vcWindBarbTickAngleF', 'vcWindBarbTickLengthF', - 'vcWindBarbTickSpacingF', 'vcZeroFLabelAngleF', - 'vcZeroFLabelBackgroundColor', 'vcZeroFLabelConstantSpacingF', - 'vcZeroFLabelFont', 'vcZeroFLabelFontAspectF', - 'vcZeroFLabelFontColor', 'vcZeroFLabelFontHeightF', - 'vcZeroFLabelFontQuality', 'vcZeroFLabelFontThicknessF', - 'vcZeroFLabelFuncCode', 'vcZeroFLabelJust', 'vcZeroFLabelOn', - 'vcZeroFLabelOrthogonalPosF', 'vcZeroFLabelParallelPosF', - 'vcZeroFLabelPerimColor', 'vcZeroFLabelPerimOn', - 'vcZeroFLabelPerimSpaceF', 'vcZeroFLabelPerimThicknessF', - 'vcZeroFLabelSide', 'vcZeroFLabelString', 'vcZeroFLabelTextDirection', - 'vcZeroFLabelZone', 'vfCopyData', 'vfDataArray', - 'vfExchangeDimensions', 'vfExchangeUVData', 'vfMagMaxV', 'vfMagMinV', - 'vfMissingUValueV', 'vfMissingVValueV', 'vfPolarData', - 'vfSingleMissingValue', 'vfUDataArray', 'vfUMaxV', 'vfUMinV', - 'vfVDataArray', 'vfVMaxV', 'vfVMinV', 'vfXArray', 'vfXCActualEndF', - 'vfXCActualStartF', 'vfXCEndIndex', 'vfXCEndSubsetV', 'vfXCEndV', - 'vfXCStartIndex', 'vfXCStartSubsetV', 'vfXCStartV', 'vfXCStride', - 'vfYArray', 'vfYCActualEndF', 'vfYCActualStartF', 'vfYCEndIndex', - 'vfYCEndSubsetV', 'vfYCEndV', 'vfYCStartIndex', 'vfYCStartSubsetV', - 'vfYCStartV', 'vfYCStride', 'vpAnnoManagerId', 'vpClipOn', - 'vpHeightF', 'vpKeepAspect', 'vpOn', 'vpUseSegments', 'vpWidthF', - 'vpXF', 'vpYF', 'wkAntiAlias', 'wkBackgroundColor', 'wkBackgroundOpacityF', - 'wkColorMapLen', 'wkColorMap', 'wkColorModel', 'wkDashTableLength', - 'wkDefGraphicStyleId', 'wkDeviceLowerX', 'wkDeviceLowerY', - 'wkDeviceUpperX', 'wkDeviceUpperY', 'wkFileName', 'wkFillTableLength', - 'wkForegroundColor', 'wkFormat', 'wkFullBackground', 'wkGksWorkId', - 'wkHeight', 'wkMarkerTableLength', 'wkMetaName', 'wkOrientation', - 'wkPDFFileName', 'wkPDFFormat', 'wkPDFResolution', 'wkPSFileName', - 'wkPSFormat', 'wkPSResolution', 'wkPaperHeightF', 'wkPaperSize', - 'wkPaperWidthF', 'wkPause', 'wkTopLevelViews', 'wkViews', - 'wkVisualType', 'wkWidth', 'wkWindowId', 'wkXColorMode', 'wsCurrentSize', - 'wsMaximumSize', 'wsThresholdSize', 'xyComputeXMax', - 'xyComputeXMin', 'xyComputeYMax', 'xyComputeYMin', 'xyCoordData', - 'xyCoordDataSpec', 'xyCurveDrawOrder', 'xyDashPattern', - 'xyDashPatterns', 'xyExplicitLabels', 'xyExplicitLegendLabels', - 'xyLabelMode', 'xyLineColor', 'xyLineColors', 'xyLineDashSegLenF', - 'xyLineLabelConstantSpacingF', 'xyLineLabelFont', - 'xyLineLabelFontAspectF', 'xyLineLabelFontColor', - 'xyLineLabelFontColors', 'xyLineLabelFontHeightF', - 'xyLineLabelFontQuality', 'xyLineLabelFontThicknessF', - 'xyLineLabelFuncCode', 'xyLineThicknessF', 'xyLineThicknesses', - 'xyMarkLineMode', 'xyMarkLineModes', 'xyMarker', 'xyMarkerColor', - 'xyMarkerColors', 'xyMarkerSizeF', 'xyMarkerSizes', - 'xyMarkerThicknessF', 'xyMarkerThicknesses', 'xyMarkers', - 'xyMonoDashPattern', 'xyMonoLineColor', 'xyMonoLineLabelFontColor', - 'xyMonoLineThickness', 'xyMonoMarkLineMode', 'xyMonoMarker', - 'xyMonoMarkerColor', 'xyMonoMarkerSize', 'xyMonoMarkerThickness', - 'xyXIrrTensionF', 'xyXIrregularPoints', 'xyXStyle', 'xyYIrrTensionF', - 'xyYIrregularPoints', 'xyYStyle'), prefix=r'\b'), - Name.Builtin), - - # Booleans - (r'\.(True|False)\.', Name.Builtin), - # Comparing Operators - (r'\.(eq|ne|lt|le|gt|ge|not|and|or|xor)\.', Operator.Word), - ], - - 'strings': [ - (r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double), - ], - - 'nums': [ - (r'\d+(?![.e])(_[a-z]\w+)?', Number.Integer), - (r'[+-]?\d*\.\d+(e[-+]?\d+)?(_[a-z]\w+)?', Number.Float), - (r'[+-]?\d+\.\d*(e[-+]?\d+)?(_[a-z]\w+)?', Number.Float), - ], - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.ncl + ~~~~~~~~~~~~~~~~~~~ + + Lexers for NCAR Command Language. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['NCLLexer'] + + +class NCLLexer(RegexLexer): + """ + Lexer for NCL code. + + .. versionadded:: 2.2 + """ + name = 'NCL' + aliases = ['ncl'] + filenames = ['*.ncl'] + mimetypes = ['text/ncl'] + flags = re.MULTILINE + + tokens = { + 'root': [ + (r';.*\n', Comment), + include('strings'), + include('core'), + (r'[a-zA-Z_]\w*', Name), + include('nums'), + (r'[\s]+', Text), + ], + 'core': [ + # Statements + (words(( + 'begin', 'break', 'continue', 'create', 'defaultapp', 'do', + 'else', 'end', 'external', 'exit', 'True', 'False', 'file', 'function', + 'getvalues', 'graphic', 'group', 'if', 'list', 'load', 'local', + 'new', '_Missing', 'Missing', 'noparent', 'procedure', + 'quit', 'QUIT', 'Quit', 'record', 'return', 'setvalues', 'stop', + 'then', 'while'), prefix=r'\b', suffix=r'\s*\b'), + Keyword), + + # Data Types + (words(( + 'ubyte', 'uint', 'uint64', 'ulong', 'string', 'byte', + 'character', 'double', 'float', 'integer', 'int64', 'logical', + 'long', 'short', 'ushort', 'enumeric', 'numeric', 'snumeric'), + prefix=r'\b', suffix=r'\s*\b'), + Keyword.Type), + + # Operators + (r'[\%^*+\-/<>]', Operator), + + # punctuation: + (r'[\[\]():@$!&|.,\\{}]', Punctuation), + (r'[=:]', Punctuation), + + # Intrinsics + (words(( + 'abs', 'acos', 'addfile', 'addfiles', 'all', 'angmom_atm', 'any', + 'area_conserve_remap', 'area_hi2lores', 'area_poly_sphere', + 'asciiread', 'asciiwrite', 'asin', 'atan', 'atan2', 'attsetvalues', + 'avg', 'betainc', 'bin_avg', 'bin_sum', 'bw_bandpass_filter', + 'cancor', 'cbinread', 'cbinwrite', 'cd_calendar', 'cd_inv_calendar', + 'cdfbin_p', 'cdfbin_pr', 'cdfbin_s', 'cdfbin_xn', 'cdfchi_p', + 'cdfchi_x', 'cdfgam_p', 'cdfgam_x', 'cdfnor_p', 'cdfnor_x', + 'cdft_p', 'cdft_t', 'ceil', 'center_finite_diff', + 'center_finite_diff_n', 'cfftb', 'cfftf', 'cfftf_frq_reorder', + 'charactertodouble', 'charactertofloat', 'charactertointeger', + 'charactertolong', 'charactertoshort', 'charactertostring', + 'chartodouble', 'chartofloat', 'chartoint', 'chartointeger', + 'chartolong', 'chartoshort', 'chartostring', 'chiinv', 'clear', + 'color_index_to_rgba', 'conform', 'conform_dims', 'cos', 'cosh', + 'count_unique_values', 'covcorm', 'covcorm_xy', 'craybinnumrec', + 'craybinrecread', 'create_graphic', 'csa1', 'csa1d', 'csa1s', + 'csa1x', 'csa1xd', 'csa1xs', 'csa2', 'csa2d', 'csa2l', 'csa2ld', + 'csa2ls', 'csa2lx', 'csa2lxd', 'csa2lxs', 'csa2s', 'csa2x', + 'csa2xd', 'csa2xs', 'csa3', 'csa3d', 'csa3l', 'csa3ld', 'csa3ls', + 'csa3lx', 'csa3lxd', 'csa3lxs', 'csa3s', 'csa3x', 'csa3xd', + 'csa3xs', 'csc2s', 'csgetp', 'css2c', 'cssetp', 'cssgrid', 'csstri', + 'csvoro', 'cumsum', 'cz2ccm', 'datatondc', 'day_of_week', + 'day_of_year', 'days_in_month', 'default_fillvalue', 'delete', + 'depth_to_pres', 'destroy', 'determinant', 'dewtemp_trh', + 'dgeevx_lapack', 'dim_acumrun_n', 'dim_avg', 'dim_avg_n', + 'dim_avg_wgt', 'dim_avg_wgt_n', 'dim_cumsum', 'dim_cumsum_n', + 'dim_gamfit_n', 'dim_gbits', 'dim_max', 'dim_max_n', 'dim_median', + 'dim_median_n', 'dim_min', 'dim_min_n', 'dim_num', 'dim_num_n', + 'dim_numrun_n', 'dim_pqsort', 'dim_pqsort_n', 'dim_product', + 'dim_product_n', 'dim_rmsd', 'dim_rmsd_n', 'dim_rmvmean', + 'dim_rmvmean_n', 'dim_rmvmed', 'dim_rmvmed_n', 'dim_spi_n', + 'dim_standardize', 'dim_standardize_n', 'dim_stat4', 'dim_stat4_n', + 'dim_stddev', 'dim_stddev_n', 'dim_sum', 'dim_sum_n', 'dim_sum_wgt', + 'dim_sum_wgt_n', 'dim_variance', 'dim_variance_n', 'dimsizes', + 'doubletobyte', 'doubletochar', 'doubletocharacter', + 'doubletofloat', 'doubletoint', 'doubletointeger', 'doubletolong', + 'doubletoshort', 'dpres_hybrid_ccm', 'dpres_plevel', 'draw', + 'draw_color_palette', 'dsgetp', 'dsgrid2', 'dsgrid2d', 'dsgrid2s', + 'dsgrid3', 'dsgrid3d', 'dsgrid3s', 'dspnt2', 'dspnt2d', 'dspnt2s', + 'dspnt3', 'dspnt3d', 'dspnt3s', 'dssetp', 'dtrend', 'dtrend_msg', + 'dtrend_msg_n', 'dtrend_n', 'dtrend_quadratic', + 'dtrend_quadratic_msg_n', 'dv2uvf', 'dv2uvg', 'dz_height', + 'echo_off', 'echo_on', 'eof2data', 'eof_varimax', 'eofcor', + 'eofcor_pcmsg', 'eofcor_ts', 'eofcov', 'eofcov_pcmsg', 'eofcov_ts', + 'eofunc', 'eofunc_ts', 'eofunc_varimax', 'equiv_sample_size', 'erf', + 'erfc', 'esacr', 'esacv', 'esccr', 'esccv', 'escorc', 'escorc_n', + 'escovc', 'exit', 'exp', 'exp_tapersh', 'exp_tapersh_wgts', + 'exp_tapershC', 'ezfftb', 'ezfftb_n', 'ezfftf', 'ezfftf_n', + 'f2fosh', 'f2foshv', 'f2fsh', 'f2fshv', 'f2gsh', 'f2gshv', 'fabs', + 'fbindirread', 'fbindirwrite', 'fbinnumrec', 'fbinread', + 'fbinrecread', 'fbinrecwrite', 'fbinwrite', 'fft2db', 'fft2df', + 'fftshift', 'fileattdef', 'filechunkdimdef', 'filedimdef', + 'fileexists', 'filegrpdef', 'filevarattdef', 'filevarchunkdef', + 'filevarcompressleveldef', 'filevardef', 'filevardimsizes', + 'filwgts_lancos', 'filwgts_lanczos', 'filwgts_normal', + 'floattobyte', 'floattochar', 'floattocharacter', 'floattoint', + 'floattointeger', 'floattolong', 'floattoshort', 'floor', + 'fluxEddy', 'fo2fsh', 'fo2fshv', 'fourier_info', 'frame', 'fspan', + 'ftcurv', 'ftcurvd', 'ftcurvi', 'ftcurvp', 'ftcurvpi', 'ftcurvps', + 'ftcurvs', 'ftest', 'ftgetp', 'ftkurv', 'ftkurvd', 'ftkurvp', + 'ftkurvpd', 'ftsetp', 'ftsurf', 'g2fsh', 'g2fshv', 'g2gsh', + 'g2gshv', 'gamma', 'gammainc', 'gaus', 'gaus_lobat', + 'gaus_lobat_wgt', 'gc_aangle', 'gc_clkwise', 'gc_dangle', + 'gc_inout', 'gc_latlon', 'gc_onarc', 'gc_pnt2gc', 'gc_qarea', + 'gc_tarea', 'generate_2d_array', 'get_color_index', + 'get_color_rgba', 'get_cpu_time', 'get_isolines', 'get_ncl_version', + 'get_script_name', 'get_script_prefix_name', 'get_sphere_radius', + 'get_unique_values', 'getbitsone', 'getenv', 'getfiledimsizes', + 'getfilegrpnames', 'getfilepath', 'getfilevaratts', + 'getfilevarchunkdimsizes', 'getfilevardims', 'getfilevardimsizes', + 'getfilevarnames', 'getfilevartypes', 'getvaratts', 'getvardims', + 'gradsf', 'gradsg', 'greg2jul', 'grid2triple', 'hlsrgb', 'hsvrgb', + 'hydro', 'hyi2hyo', 'idsfft', 'igradsf', 'igradsg', 'ilapsf', + 'ilapsg', 'ilapvf', 'ilapvg', 'ind', 'ind_resolve', 'int2p', + 'int2p_n', 'integertobyte', 'integertochar', 'integertocharacter', + 'integertoshort', 'inttobyte', 'inttochar', 'inttoshort', + 'inverse_matrix', 'isatt', 'isbigendian', 'isbyte', 'ischar', + 'iscoord', 'isdefined', 'isdim', 'isdimnamed', 'isdouble', + 'isenumeric', 'isfile', 'isfilepresent', 'isfilevar', + 'isfilevaratt', 'isfilevarcoord', 'isfilevardim', 'isfloat', + 'isfunc', 'isgraphic', 'isint', 'isint64', 'isinteger', + 'isleapyear', 'islogical', 'islong', 'ismissing', 'isnan_ieee', + 'isnumeric', 'ispan', 'isproc', 'isshort', 'issnumeric', 'isstring', + 'isubyte', 'isuint', 'isuint64', 'isulong', 'isunlimited', + 'isunsigned', 'isushort', 'isvar', 'jul2greg', 'kmeans_as136', + 'kolsm2_n', 'kron_product', 'lapsf', 'lapsg', 'lapvf', 'lapvg', + 'latlon2utm', 'lclvl', 'lderuvf', 'lderuvg', 'linint1', 'linint1_n', + 'linint2', 'linint2_points', 'linmsg', 'linmsg_n', 'linrood_latwgt', + 'linrood_wgt', 'list_files', 'list_filevars', 'list_hlus', + 'list_procfuncs', 'list_vars', 'ListAppend', 'ListCount', + 'ListGetType', 'ListIndex', 'ListIndexFromName', 'ListPop', + 'ListPush', 'ListSetType', 'loadscript', 'local_max', 'local_min', + 'log', 'log10', 'longtobyte', 'longtochar', 'longtocharacter', + 'longtoint', 'longtointeger', 'longtoshort', 'lspoly', 'lspoly_n', + 'mask', 'max', 'maxind', 'min', 'minind', 'mixed_layer_depth', + 'mixhum_ptd', 'mixhum_ptrh', 'mjo_cross_coh2pha', + 'mjo_cross_segment', 'moc_globe_atl', 'monthday', 'natgrid', + 'natgridd', 'natgrids', 'ncargpath', 'ncargversion', 'ndctodata', + 'ndtooned', 'new', 'NewList', 'ngezlogo', 'nggcog', 'nggetp', + 'nglogo', 'ngsetp', 'NhlAddAnnotation', 'NhlAddData', + 'NhlAddOverlay', 'NhlAddPrimitive', 'NhlAppGetDefaultParentId', + 'NhlChangeWorkstation', 'NhlClassName', 'NhlClearWorkstation', + 'NhlDataPolygon', 'NhlDataPolyline', 'NhlDataPolymarker', + 'NhlDataToNDC', 'NhlDestroy', 'NhlDraw', 'NhlFrame', 'NhlFreeColor', + 'NhlGetBB', 'NhlGetClassResources', 'NhlGetErrorObjectId', + 'NhlGetNamedColorIndex', 'NhlGetParentId', + 'NhlGetParentWorkstation', 'NhlGetWorkspaceObjectId', + 'NhlIsAllocatedColor', 'NhlIsApp', 'NhlIsDataComm', 'NhlIsDataItem', + 'NhlIsDataSpec', 'NhlIsTransform', 'NhlIsView', 'NhlIsWorkstation', + 'NhlName', 'NhlNDCPolygon', 'NhlNDCPolyline', 'NhlNDCPolymarker', + 'NhlNDCToData', 'NhlNewColor', 'NhlNewDashPattern', 'NhlNewMarker', + 'NhlPalGetDefined', 'NhlRemoveAnnotation', 'NhlRemoveData', + 'NhlRemoveOverlay', 'NhlRemovePrimitive', 'NhlSetColor', + 'NhlSetDashPattern', 'NhlSetMarker', 'NhlUpdateData', + 'NhlUpdateWorkstation', 'nice_mnmxintvl', 'nngetaspectd', + 'nngetaspects', 'nngetp', 'nngetsloped', 'nngetslopes', 'nngetwts', + 'nngetwtsd', 'nnpnt', 'nnpntd', 'nnpntend', 'nnpntendd', + 'nnpntinit', 'nnpntinitd', 'nnpntinits', 'nnpnts', 'nnsetp', 'num', + 'obj_anal_ic', 'omega_ccm', 'onedtond', 'overlay', 'paleo_outline', + 'pdfxy_bin', 'poisson_grid_fill', 'pop_remap', 'potmp_insitu_ocn', + 'prcwater_dp', 'pres2hybrid', 'pres_hybrid_ccm', 'pres_sigma', + 'print', 'print_table', 'printFileVarSummary', 'printVarSummary', + 'product', 'pslec', 'pslhor', 'pslhyp', 'qsort', 'rand', + 'random_chi', 'random_gamma', 'random_normal', 'random_setallseed', + 'random_uniform', 'rcm2points', 'rcm2rgrid', 'rdsstoi', + 'read_colormap_file', 'reg_multlin', 'regcoef', 'regCoef_n', + 'regline', 'relhum', 'replace_ieeenan', 'reshape', 'reshape_ind', + 'rgba_to_color_index', 'rgbhls', 'rgbhsv', 'rgbyiq', 'rgrid2rcm', + 'rhomb_trunc', 'rip_cape_2d', 'rip_cape_3d', 'round', 'rtest', + 'runave', 'runave_n', 'set_default_fillvalue', 'set_sphere_radius', + 'setfileoption', 'sfvp2uvf', 'sfvp2uvg', 'shaec', 'shagc', + 'shgetnp', 'shgetp', 'shgrid', 'shorttobyte', 'shorttochar', + 'shorttocharacter', 'show_ascii', 'shsec', 'shsetp', 'shsgc', + 'shsgc_R42', 'sigma2hybrid', 'simpeq', 'simpne', 'sin', + 'sindex_yrmo', 'sinh', 'sizeof', 'sleep', 'smth9', 'snindex_yrmo', + 'solve_linsys', 'span_color_indexes', 'span_color_rgba', + 'sparse_matrix_mult', 'spcorr', 'spcorr_n', 'specx_anal', + 'specxy_anal', 'spei', 'sprintf', 'sprinti', 'sqrt', 'sqsort', + 'srand', 'stat2', 'stat4', 'stat_medrng', 'stat_trim', + 'status_exit', 'stdatmus_p2tdz', 'stdatmus_z2tdp', 'stddev', + 'str_capital', 'str_concat', 'str_fields_count', 'str_get_cols', + 'str_get_dq', 'str_get_field', 'str_get_nl', 'str_get_sq', + 'str_get_tab', 'str_index_of_substr', 'str_insert', 'str_is_blank', + 'str_join', 'str_left_strip', 'str_lower', 'str_match', + 'str_match_ic', 'str_match_ic_regex', 'str_match_ind', + 'str_match_ind_ic', 'str_match_ind_ic_regex', 'str_match_ind_regex', + 'str_match_regex', 'str_right_strip', 'str_split', + 'str_split_by_length', 'str_split_csv', 'str_squeeze', 'str_strip', + 'str_sub_str', 'str_switch', 'str_upper', 'stringtochar', + 'stringtocharacter', 'stringtodouble', 'stringtofloat', + 'stringtoint', 'stringtointeger', 'stringtolong', 'stringtoshort', + 'strlen', 'student_t', 'sum', 'svd_lapack', 'svdcov', 'svdcov_sv', + 'svdstd', 'svdstd_sv', 'system', 'systemfunc', 'tan', 'tanh', + 'taper', 'taper_n', 'tdclrs', 'tdctri', 'tdcudp', 'tdcurv', + 'tddtri', 'tdez2d', 'tdez3d', 'tdgetp', 'tdgrds', 'tdgrid', + 'tdgtrs', 'tdinit', 'tditri', 'tdlbla', 'tdlblp', 'tdlbls', + 'tdline', 'tdlndp', 'tdlnpa', 'tdlpdp', 'tdmtri', 'tdotri', + 'tdpara', 'tdplch', 'tdprpa', 'tdprpi', 'tdprpt', 'tdsetp', + 'tdsort', 'tdstri', 'tdstrs', 'tdttri', 'thornthwaite', 'tobyte', + 'tochar', 'todouble', 'tofloat', 'toint', 'toint64', 'tointeger', + 'tolong', 'toshort', 'tosigned', 'tostring', 'tostring_with_format', + 'totype', 'toubyte', 'touint', 'touint64', 'toulong', 'tounsigned', + 'toushort', 'trend_manken', 'tri_trunc', 'triple2grid', + 'triple2grid2d', 'trop_wmo', 'ttest', 'typeof', 'undef', + 'unique_string', 'update', 'ushorttoint', 'ut_calendar', + 'ut_inv_calendar', 'utm2latlon', 'uv2dv_cfd', 'uv2dvf', 'uv2dvg', + 'uv2sfvpf', 'uv2sfvpg', 'uv2vr_cfd', 'uv2vrdvf', 'uv2vrdvg', + 'uv2vrf', 'uv2vrg', 'v5d_close', 'v5d_create', 'v5d_setLowLev', + 'v5d_setUnits', 'v5d_write', 'v5d_write_var', 'variance', 'vhaec', + 'vhagc', 'vhsec', 'vhsgc', 'vibeta', 'vinth2p', 'vinth2p_ecmwf', + 'vinth2p_ecmwf_nodes', 'vinth2p_nodes', 'vintp2p_ecmwf', 'vr2uvf', + 'vr2uvg', 'vrdv2uvf', 'vrdv2uvg', 'wavelet', 'wavelet_default', + 'weibull', 'wgt_area_smooth', 'wgt_areaave', 'wgt_areaave2', + 'wgt_arearmse', 'wgt_arearmse2', 'wgt_areasum2', 'wgt_runave', + 'wgt_runave_n', 'wgt_vert_avg_beta', 'wgt_volave', 'wgt_volave_ccm', + 'wgt_volrmse', 'wgt_volrmse_ccm', 'where', 'wk_smooth121', 'wmbarb', + 'wmbarbmap', 'wmdrft', 'wmgetp', 'wmlabs', 'wmsetp', 'wmstnm', + 'wmvect', 'wmvectmap', 'wmvlbl', 'wrf_avo', 'wrf_cape_2d', + 'wrf_cape_3d', 'wrf_dbz', 'wrf_eth', 'wrf_helicity', 'wrf_ij_to_ll', + 'wrf_interp_1d', 'wrf_interp_2d_xy', 'wrf_interp_3d_z', + 'wrf_latlon_to_ij', 'wrf_ll_to_ij', 'wrf_omega', 'wrf_pvo', + 'wrf_rh', 'wrf_slp', 'wrf_smooth_2d', 'wrf_td', 'wrf_tk', + 'wrf_updraft_helicity', 'wrf_uvmet', 'wrf_virtual_temp', + 'wrf_wetbulb', 'wrf_wps_close_int', 'wrf_wps_open_int', + 'wrf_wps_rddata_int', 'wrf_wps_rdhead_int', 'wrf_wps_read_int', + 'wrf_wps_write_int', 'write_matrix', 'write_table', 'yiqrgb', + 'z2geouv', 'zonal_mpsi', 'addfiles_GetVar', 'advect_variable', + 'area_conserve_remap_Wrap', 'area_hi2lores_Wrap', + 'array_append_record', 'assignFillValue', 'byte2flt', + 'byte2flt_hdf', 'calcDayAnomTLL', 'calcMonAnomLLLT', + 'calcMonAnomLLT', 'calcMonAnomTLL', 'calcMonAnomTLLL', + 'calculate_monthly_values', 'cd_convert', 'changeCase', + 'changeCaseChar', 'clmDayTLL', 'clmDayTLLL', 'clmMon2clmDay', + 'clmMonLLLT', 'clmMonLLT', 'clmMonTLL', 'clmMonTLLL', 'closest_val', + 'copy_VarAtts', 'copy_VarCoords', 'copy_VarCoords_1', + 'copy_VarCoords_2', 'copy_VarMeta', 'copyatt', 'crossp3', + 'cshstringtolist', 'cssgrid_Wrap', 'dble2flt', 'decimalPlaces', + 'delete_VarAtts', 'dim_avg_n_Wrap', 'dim_avg_wgt_n_Wrap', + 'dim_avg_wgt_Wrap', 'dim_avg_Wrap', 'dim_cumsum_n_Wrap', + 'dim_cumsum_Wrap', 'dim_max_n_Wrap', 'dim_min_n_Wrap', + 'dim_rmsd_n_Wrap', 'dim_rmsd_Wrap', 'dim_rmvmean_n_Wrap', + 'dim_rmvmean_Wrap', 'dim_rmvmed_n_Wrap', 'dim_rmvmed_Wrap', + 'dim_standardize_n_Wrap', 'dim_standardize_Wrap', + 'dim_stddev_n_Wrap', 'dim_stddev_Wrap', 'dim_sum_n_Wrap', + 'dim_sum_wgt_n_Wrap', 'dim_sum_wgt_Wrap', 'dim_sum_Wrap', + 'dim_variance_n_Wrap', 'dim_variance_Wrap', 'dpres_plevel_Wrap', + 'dtrend_leftdim', 'dv2uvF_Wrap', 'dv2uvG_Wrap', 'eof_north', + 'eofcor_Wrap', 'eofcov_Wrap', 'eofunc_north', 'eofunc_ts_Wrap', + 'eofunc_varimax_reorder', 'eofunc_varimax_Wrap', 'eofunc_Wrap', + 'epsZero', 'f2fosh_Wrap', 'f2foshv_Wrap', 'f2fsh_Wrap', + 'f2fshv_Wrap', 'f2gsh_Wrap', 'f2gshv_Wrap', 'fbindirSwap', + 'fbinseqSwap1', 'fbinseqSwap2', 'flt2dble', 'flt2string', + 'fo2fsh_Wrap', 'fo2fshv_Wrap', 'g2fsh_Wrap', 'g2fshv_Wrap', + 'g2gsh_Wrap', 'g2gshv_Wrap', 'generate_resample_indices', + 'generate_sample_indices', 'generate_unique_indices', + 'genNormalDist', 'get1Dindex', 'get1Dindex_Collapse', + 'get1Dindex_Exclude', 'get_file_suffix', 'GetFillColor', + 'GetFillColorIndex', 'getFillValue', 'getind_latlon2d', + 'getVarDimNames', 'getVarFillValue', 'grib_stime2itime', + 'hyi2hyo_Wrap', 'ilapsF_Wrap', 'ilapsG_Wrap', 'ind_nearest_coord', + 'indStrSubset', 'int2dble', 'int2flt', 'int2p_n_Wrap', 'int2p_Wrap', + 'isMonotonic', 'isStrSubset', 'latGau', 'latGauWgt', 'latGlobeF', + 'latGlobeFo', 'latRegWgt', 'linint1_n_Wrap', 'linint1_Wrap', + 'linint2_points_Wrap', 'linint2_Wrap', 'local_max_1d', + 'local_min_1d', 'lonFlip', 'lonGlobeF', 'lonGlobeFo', 'lonPivot', + 'merge_levels_sfc', 'mod', 'month_to_annual', + 'month_to_annual_weighted', 'month_to_season', 'month_to_season12', + 'month_to_seasonN', 'monthly_total_to_daily_mean', 'nameDim', + 'natgrid_Wrap', 'NewCosWeight', 'niceLatLon2D', 'NormCosWgtGlobe', + 'numAsciiCol', 'numAsciiRow', 'numeric2int', + 'obj_anal_ic_deprecated', 'obj_anal_ic_Wrap', 'omega_ccm_driver', + 'omega_to_w', 'oneDtostring', 'pack_values', 'pattern_cor', 'pdfx', + 'pdfxy', 'pdfxy_conform', 'pot_temp', 'pot_vort_hybrid', + 'pot_vort_isobaric', 'pres2hybrid_Wrap', 'print_clock', + 'printMinMax', 'quadroots', 'rcm2points_Wrap', 'rcm2rgrid_Wrap', + 'readAsciiHead', 'readAsciiTable', 'reg_multlin_stats', + 'region_ind', 'regline_stats', 'relhum_ttd', 'replaceSingleChar', + 'RGBtoCmap', 'rgrid2rcm_Wrap', 'rho_mwjf', 'rm_single_dims', + 'rmAnnCycle1D', 'rmInsufData', 'rmMonAnnCycLLLT', 'rmMonAnnCycLLT', + 'rmMonAnnCycTLL', 'runave_n_Wrap', 'runave_Wrap', 'short2flt', + 'short2flt_hdf', 'shsgc_R42_Wrap', 'sign_f90', 'sign_matlab', + 'smth9_Wrap', 'smthClmDayTLL', 'smthClmDayTLLL', 'SqrtCosWeight', + 'stat_dispersion', 'static_stability', 'stdMonLLLT', 'stdMonLLT', + 'stdMonTLL', 'stdMonTLLL', 'symMinMaxPlt', 'table_attach_columns', + 'table_attach_rows', 'time_to_newtime', 'transpose', + 'triple2grid_Wrap', 'ut_convert', 'uv2dvF_Wrap', 'uv2dvG_Wrap', + 'uv2vrF_Wrap', 'uv2vrG_Wrap', 'vr2uvF_Wrap', 'vr2uvG_Wrap', + 'w_to_omega', 'wallClockElapseTime', 'wave_number_spc', + 'wgt_areaave_Wrap', 'wgt_runave_leftdim', 'wgt_runave_n_Wrap', + 'wgt_runave_Wrap', 'wgt_vertical_n', 'wind_component', + 'wind_direction', 'yyyyddd_to_yyyymmdd', 'yyyymm_time', + 'yyyymm_to_yyyyfrac', 'yyyymmdd_time', 'yyyymmdd_to_yyyyddd', + 'yyyymmdd_to_yyyyfrac', 'yyyymmddhh_time', 'yyyymmddhh_to_yyyyfrac', + 'zonal_mpsi_Wrap', 'zonalAve', 'calendar_decode2', 'cd_string', + 'kf_filter', 'run_cor', 'time_axis_labels', 'ut_string', + 'wrf_contour', 'wrf_map', 'wrf_map_overlay', 'wrf_map_overlays', + 'wrf_map_resources', 'wrf_map_zoom', 'wrf_overlay', 'wrf_overlays', + 'wrf_user_getvar', 'wrf_user_ij_to_ll', 'wrf_user_intrp2d', + 'wrf_user_intrp3d', 'wrf_user_latlon_to_ij', 'wrf_user_list_times', + 'wrf_user_ll_to_ij', 'wrf_user_unstagger', 'wrf_user_vert_interp', + 'wrf_vector', 'gsn_add_annotation', 'gsn_add_polygon', + 'gsn_add_polyline', 'gsn_add_polymarker', + 'gsn_add_shapefile_polygons', 'gsn_add_shapefile_polylines', + 'gsn_add_shapefile_polymarkers', 'gsn_add_text', 'gsn_attach_plots', + 'gsn_blank_plot', 'gsn_contour', 'gsn_contour_map', + 'gsn_contour_shade', 'gsn_coordinates', 'gsn_create_labelbar', + 'gsn_create_legend', 'gsn_create_text', + 'gsn_csm_attach_zonal_means', 'gsn_csm_blank_plot', + 'gsn_csm_contour', 'gsn_csm_contour_map', 'gsn_csm_contour_map_ce', + 'gsn_csm_contour_map_overlay', 'gsn_csm_contour_map_polar', + 'gsn_csm_hov', 'gsn_csm_lat_time', 'gsn_csm_map', 'gsn_csm_map_ce', + 'gsn_csm_map_polar', 'gsn_csm_pres_hgt', + 'gsn_csm_pres_hgt_streamline', 'gsn_csm_pres_hgt_vector', + 'gsn_csm_streamline', 'gsn_csm_streamline_contour_map', + 'gsn_csm_streamline_contour_map_ce', + 'gsn_csm_streamline_contour_map_polar', 'gsn_csm_streamline_map', + 'gsn_csm_streamline_map_ce', 'gsn_csm_streamline_map_polar', + 'gsn_csm_streamline_scalar', 'gsn_csm_streamline_scalar_map', + 'gsn_csm_streamline_scalar_map_ce', + 'gsn_csm_streamline_scalar_map_polar', 'gsn_csm_time_lat', + 'gsn_csm_vector', 'gsn_csm_vector_map', 'gsn_csm_vector_map_ce', + 'gsn_csm_vector_map_polar', 'gsn_csm_vector_scalar', + 'gsn_csm_vector_scalar_map', 'gsn_csm_vector_scalar_map_ce', + 'gsn_csm_vector_scalar_map_polar', 'gsn_csm_x2y', 'gsn_csm_x2y2', + 'gsn_csm_xy', 'gsn_csm_xy2', 'gsn_csm_xy3', 'gsn_csm_y', + 'gsn_define_colormap', 'gsn_draw_colormap', 'gsn_draw_named_colors', + 'gsn_histogram', 'gsn_labelbar_ndc', 'gsn_legend_ndc', 'gsn_map', + 'gsn_merge_colormaps', 'gsn_open_wks', 'gsn_panel', 'gsn_polygon', + 'gsn_polygon_ndc', 'gsn_polyline', 'gsn_polyline_ndc', + 'gsn_polymarker', 'gsn_polymarker_ndc', 'gsn_retrieve_colormap', + 'gsn_reverse_colormap', 'gsn_streamline', 'gsn_streamline_map', + 'gsn_streamline_scalar', 'gsn_streamline_scalar_map', 'gsn_table', + 'gsn_text', 'gsn_text_ndc', 'gsn_vector', 'gsn_vector_map', + 'gsn_vector_scalar', 'gsn_vector_scalar_map', 'gsn_xy', 'gsn_y', + 'hsv2rgb', 'maximize_output', 'namedcolor2rgb', 'namedcolor2rgba', + 'reset_device_coordinates', 'span_named_colors'), prefix=r'\b'), + Name.Builtin), + + # Resources + (words(( + 'amDataXF', 'amDataYF', 'amJust', 'amOn', 'amOrthogonalPosF', + 'amParallelPosF', 'amResizeNotify', 'amSide', 'amTrackData', + 'amViewId', 'amZone', 'appDefaultParent', 'appFileSuffix', + 'appResources', 'appSysDir', 'appUsrDir', 'caCopyArrays', + 'caXArray', 'caXCast', 'caXMaxV', 'caXMinV', 'caXMissingV', + 'caYArray', 'caYCast', 'caYMaxV', 'caYMinV', 'caYMissingV', + 'cnCellFillEdgeColor', 'cnCellFillMissingValEdgeColor', + 'cnConpackParams', 'cnConstFEnableFill', 'cnConstFLabelAngleF', + 'cnConstFLabelBackgroundColor', 'cnConstFLabelConstantSpacingF', + 'cnConstFLabelFont', 'cnConstFLabelFontAspectF', + 'cnConstFLabelFontColor', 'cnConstFLabelFontHeightF', + 'cnConstFLabelFontQuality', 'cnConstFLabelFontThicknessF', + 'cnConstFLabelFormat', 'cnConstFLabelFuncCode', 'cnConstFLabelJust', + 'cnConstFLabelOn', 'cnConstFLabelOrthogonalPosF', + 'cnConstFLabelParallelPosF', 'cnConstFLabelPerimColor', + 'cnConstFLabelPerimOn', 'cnConstFLabelPerimSpaceF', + 'cnConstFLabelPerimThicknessF', 'cnConstFLabelSide', + 'cnConstFLabelString', 'cnConstFLabelTextDirection', + 'cnConstFLabelZone', 'cnConstFUseInfoLabelRes', + 'cnExplicitLabelBarLabelsOn', 'cnExplicitLegendLabelsOn', + 'cnExplicitLineLabelsOn', 'cnFillBackgroundColor', 'cnFillColor', + 'cnFillColors', 'cnFillDotSizeF', 'cnFillDrawOrder', 'cnFillMode', + 'cnFillOn', 'cnFillOpacityF', 'cnFillPalette', 'cnFillPattern', + 'cnFillPatterns', 'cnFillScaleF', 'cnFillScales', 'cnFixFillBleed', + 'cnGridBoundFillColor', 'cnGridBoundFillPattern', + 'cnGridBoundFillScaleF', 'cnGridBoundPerimColor', + 'cnGridBoundPerimDashPattern', 'cnGridBoundPerimOn', + 'cnGridBoundPerimThicknessF', 'cnHighLabelAngleF', + 'cnHighLabelBackgroundColor', 'cnHighLabelConstantSpacingF', + 'cnHighLabelCount', 'cnHighLabelFont', 'cnHighLabelFontAspectF', + 'cnHighLabelFontColor', 'cnHighLabelFontHeightF', + 'cnHighLabelFontQuality', 'cnHighLabelFontThicknessF', + 'cnHighLabelFormat', 'cnHighLabelFuncCode', 'cnHighLabelPerimColor', + 'cnHighLabelPerimOn', 'cnHighLabelPerimSpaceF', + 'cnHighLabelPerimThicknessF', 'cnHighLabelString', 'cnHighLabelsOn', + 'cnHighLowLabelOverlapMode', 'cnHighUseLineLabelRes', + 'cnInfoLabelAngleF', 'cnInfoLabelBackgroundColor', + 'cnInfoLabelConstantSpacingF', 'cnInfoLabelFont', + 'cnInfoLabelFontAspectF', 'cnInfoLabelFontColor', + 'cnInfoLabelFontHeightF', 'cnInfoLabelFontQuality', + 'cnInfoLabelFontThicknessF', 'cnInfoLabelFormat', + 'cnInfoLabelFuncCode', 'cnInfoLabelJust', 'cnInfoLabelOn', + 'cnInfoLabelOrthogonalPosF', 'cnInfoLabelParallelPosF', + 'cnInfoLabelPerimColor', 'cnInfoLabelPerimOn', + 'cnInfoLabelPerimSpaceF', 'cnInfoLabelPerimThicknessF', + 'cnInfoLabelSide', 'cnInfoLabelString', 'cnInfoLabelTextDirection', + 'cnInfoLabelZone', 'cnLabelBarEndLabelsOn', 'cnLabelBarEndStyle', + 'cnLabelDrawOrder', 'cnLabelMasking', 'cnLabelScaleFactorF', + 'cnLabelScaleValueF', 'cnLabelScalingMode', 'cnLegendLevelFlags', + 'cnLevelCount', 'cnLevelFlag', 'cnLevelFlags', 'cnLevelSelectionMode', + 'cnLevelSpacingF', 'cnLevels', 'cnLineColor', 'cnLineColors', + 'cnLineDashPattern', 'cnLineDashPatterns', 'cnLineDashSegLenF', + 'cnLineDrawOrder', 'cnLineLabelAngleF', 'cnLineLabelBackgroundColor', + 'cnLineLabelConstantSpacingF', 'cnLineLabelCount', + 'cnLineLabelDensityF', 'cnLineLabelFont', 'cnLineLabelFontAspectF', + 'cnLineLabelFontColor', 'cnLineLabelFontColors', + 'cnLineLabelFontHeightF', 'cnLineLabelFontQuality', + 'cnLineLabelFontThicknessF', 'cnLineLabelFormat', + 'cnLineLabelFuncCode', 'cnLineLabelInterval', 'cnLineLabelPerimColor', + 'cnLineLabelPerimOn', 'cnLineLabelPerimSpaceF', + 'cnLineLabelPerimThicknessF', 'cnLineLabelPlacementMode', + 'cnLineLabelStrings', 'cnLineLabelsOn', 'cnLinePalette', + 'cnLineThicknessF', 'cnLineThicknesses', 'cnLinesOn', + 'cnLowLabelAngleF', 'cnLowLabelBackgroundColor', + 'cnLowLabelConstantSpacingF', 'cnLowLabelCount', 'cnLowLabelFont', + 'cnLowLabelFontAspectF', 'cnLowLabelFontColor', + 'cnLowLabelFontHeightF', 'cnLowLabelFontQuality', + 'cnLowLabelFontThicknessF', 'cnLowLabelFormat', 'cnLowLabelFuncCode', + 'cnLowLabelPerimColor', 'cnLowLabelPerimOn', 'cnLowLabelPerimSpaceF', + 'cnLowLabelPerimThicknessF', 'cnLowLabelString', 'cnLowLabelsOn', + 'cnLowUseHighLabelRes', 'cnMaxDataValueFormat', 'cnMaxLevelCount', + 'cnMaxLevelValF', 'cnMaxPointDistanceF', 'cnMinLevelValF', + 'cnMissingValFillColor', 'cnMissingValFillPattern', + 'cnMissingValFillScaleF', 'cnMissingValPerimColor', + 'cnMissingValPerimDashPattern', 'cnMissingValPerimGridBoundOn', + 'cnMissingValPerimOn', 'cnMissingValPerimThicknessF', + 'cnMonoFillColor', 'cnMonoFillPattern', 'cnMonoFillScale', + 'cnMonoLevelFlag', 'cnMonoLineColor', 'cnMonoLineDashPattern', + 'cnMonoLineLabelFontColor', 'cnMonoLineThickness', 'cnNoDataLabelOn', + 'cnNoDataLabelString', 'cnOutOfRangeFillColor', + 'cnOutOfRangeFillPattern', 'cnOutOfRangeFillScaleF', + 'cnOutOfRangePerimColor', 'cnOutOfRangePerimDashPattern', + 'cnOutOfRangePerimOn', 'cnOutOfRangePerimThicknessF', + 'cnRasterCellSizeF', 'cnRasterMinCellSizeF', 'cnRasterModeOn', + 'cnRasterSampleFactorF', 'cnRasterSmoothingOn', 'cnScalarFieldData', + 'cnSmoothingDistanceF', 'cnSmoothingOn', 'cnSmoothingTensionF', + 'cnSpanFillPalette', 'cnSpanLinePalette', 'ctCopyTables', + 'ctXElementSize', 'ctXMaxV', 'ctXMinV', 'ctXMissingV', 'ctXTable', + 'ctXTableLengths', 'ctXTableType', 'ctYElementSize', 'ctYMaxV', + 'ctYMinV', 'ctYMissingV', 'ctYTable', 'ctYTableLengths', + 'ctYTableType', 'dcDelayCompute', 'errBuffer', + 'errFileName', 'errFilePtr', 'errLevel', 'errPrint', 'errUnitNumber', + 'gsClipOn', 'gsColors', 'gsEdgeColor', 'gsEdgeDashPattern', + 'gsEdgeDashSegLenF', 'gsEdgeThicknessF', 'gsEdgesOn', + 'gsFillBackgroundColor', 'gsFillColor', 'gsFillDotSizeF', + 'gsFillIndex', 'gsFillLineThicknessF', 'gsFillOpacityF', + 'gsFillScaleF', 'gsFont', 'gsFontAspectF', 'gsFontColor', + 'gsFontHeightF', 'gsFontOpacityF', 'gsFontQuality', + 'gsFontThicknessF', 'gsLineColor', 'gsLineDashPattern', + 'gsLineDashSegLenF', 'gsLineLabelConstantSpacingF', 'gsLineLabelFont', + 'gsLineLabelFontAspectF', 'gsLineLabelFontColor', + 'gsLineLabelFontHeightF', 'gsLineLabelFontQuality', + 'gsLineLabelFontThicknessF', 'gsLineLabelFuncCode', + 'gsLineLabelString', 'gsLineOpacityF', 'gsLineThicknessF', + 'gsMarkerColor', 'gsMarkerIndex', 'gsMarkerOpacityF', 'gsMarkerSizeF', + 'gsMarkerThicknessF', 'gsSegments', 'gsTextAngleF', + 'gsTextConstantSpacingF', 'gsTextDirection', 'gsTextFuncCode', + 'gsTextJustification', 'gsnAboveYRefLineBarColors', + 'gsnAboveYRefLineBarFillScales', 'gsnAboveYRefLineBarPatterns', + 'gsnAboveYRefLineColor', 'gsnAddCyclic', 'gsnAttachBorderOn', + 'gsnAttachPlotsXAxis', 'gsnBelowYRefLineBarColors', + 'gsnBelowYRefLineBarFillScales', 'gsnBelowYRefLineBarPatterns', + 'gsnBelowYRefLineColor', 'gsnBoxMargin', 'gsnCenterString', + 'gsnCenterStringFontColor', 'gsnCenterStringFontHeightF', + 'gsnCenterStringFuncCode', 'gsnCenterStringOrthogonalPosF', + 'gsnCenterStringParallelPosF', 'gsnContourLineThicknessesScale', + 'gsnContourNegLineDashPattern', 'gsnContourPosLineDashPattern', + 'gsnContourZeroLineThicknessF', 'gsnDebugWriteFileName', 'gsnDraw', + 'gsnFrame', 'gsnHistogramBarWidthPercent', 'gsnHistogramBinIntervals', + 'gsnHistogramBinMissing', 'gsnHistogramBinWidth', + 'gsnHistogramClassIntervals', 'gsnHistogramCompare', + 'gsnHistogramComputePercentages', + 'gsnHistogramComputePercentagesNoMissing', + 'gsnHistogramDiscreteBinValues', 'gsnHistogramDiscreteClassValues', + 'gsnHistogramHorizontal', 'gsnHistogramMinMaxBinsOn', + 'gsnHistogramNumberOfBins', 'gsnHistogramPercentSign', + 'gsnHistogramSelectNiceIntervals', 'gsnLeftString', + 'gsnLeftStringFontColor', 'gsnLeftStringFontHeightF', + 'gsnLeftStringFuncCode', 'gsnLeftStringOrthogonalPosF', + 'gsnLeftStringParallelPosF', 'gsnMajorLatSpacing', + 'gsnMajorLonSpacing', 'gsnMaskLambertConformal', + 'gsnMaskLambertConformalOutlineOn', 'gsnMaximize', + 'gsnMinorLatSpacing', 'gsnMinorLonSpacing', 'gsnPanelBottom', + 'gsnPanelCenter', 'gsnPanelDebug', 'gsnPanelFigureStrings', + 'gsnPanelFigureStringsBackgroundFillColor', + 'gsnPanelFigureStringsFontHeightF', 'gsnPanelFigureStringsJust', + 'gsnPanelFigureStringsPerimOn', 'gsnPanelLabelBar', 'gsnPanelLeft', + 'gsnPanelMainFont', 'gsnPanelMainFontColor', + 'gsnPanelMainFontHeightF', 'gsnPanelMainString', 'gsnPanelRight', + 'gsnPanelRowSpec', 'gsnPanelScalePlotIndex', 'gsnPanelTop', + 'gsnPanelXF', 'gsnPanelXWhiteSpacePercent', 'gsnPanelYF', + 'gsnPanelYWhiteSpacePercent', 'gsnPaperHeight', 'gsnPaperMargin', + 'gsnPaperOrientation', 'gsnPaperWidth', 'gsnPolar', + 'gsnPolarLabelDistance', 'gsnPolarLabelFont', + 'gsnPolarLabelFontHeightF', 'gsnPolarLabelSpacing', 'gsnPolarTime', + 'gsnPolarUT', 'gsnRightString', 'gsnRightStringFontColor', + 'gsnRightStringFontHeightF', 'gsnRightStringFuncCode', + 'gsnRightStringOrthogonalPosF', 'gsnRightStringParallelPosF', + 'gsnScalarContour', 'gsnScale', 'gsnShape', 'gsnSpreadColorEnd', + 'gsnSpreadColorStart', 'gsnSpreadColors', 'gsnStringFont', + 'gsnStringFontColor', 'gsnStringFontHeightF', 'gsnStringFuncCode', + 'gsnTickMarksOn', 'gsnXAxisIrregular2Linear', 'gsnXAxisIrregular2Log', + 'gsnXRefLine', 'gsnXRefLineColor', 'gsnXRefLineDashPattern', + 'gsnXRefLineThicknessF', 'gsnXYAboveFillColors', 'gsnXYBarChart', + 'gsnXYBarChartBarWidth', 'gsnXYBarChartColors', + 'gsnXYBarChartColors2', 'gsnXYBarChartFillDotSizeF', + 'gsnXYBarChartFillLineThicknessF', 'gsnXYBarChartFillOpacityF', + 'gsnXYBarChartFillScaleF', 'gsnXYBarChartOutlineOnly', + 'gsnXYBarChartOutlineThicknessF', 'gsnXYBarChartPatterns', + 'gsnXYBarChartPatterns2', 'gsnXYBelowFillColors', 'gsnXYFillColors', + 'gsnXYFillOpacities', 'gsnXYLeftFillColors', 'gsnXYRightFillColors', + 'gsnYAxisIrregular2Linear', 'gsnYAxisIrregular2Log', 'gsnYRefLine', + 'gsnYRefLineColor', 'gsnYRefLineColors', 'gsnYRefLineDashPattern', + 'gsnYRefLineDashPatterns', 'gsnYRefLineThicknessF', + 'gsnYRefLineThicknesses', 'gsnZonalMean', 'gsnZonalMeanXMaxF', + 'gsnZonalMeanXMinF', 'gsnZonalMeanYRefLine', 'lbAutoManage', + 'lbBottomMarginF', 'lbBoxCount', 'lbBoxEndCapStyle', 'lbBoxFractions', + 'lbBoxLineColor', 'lbBoxLineDashPattern', 'lbBoxLineDashSegLenF', + 'lbBoxLineThicknessF', 'lbBoxLinesOn', 'lbBoxMajorExtentF', + 'lbBoxMinorExtentF', 'lbBoxSeparatorLinesOn', 'lbBoxSizing', + 'lbFillBackground', 'lbFillColor', 'lbFillColors', 'lbFillDotSizeF', + 'lbFillLineThicknessF', 'lbFillPattern', 'lbFillPatterns', + 'lbFillScaleF', 'lbFillScales', 'lbJustification', 'lbLabelAlignment', + 'lbLabelAngleF', 'lbLabelAutoStride', 'lbLabelBarOn', + 'lbLabelConstantSpacingF', 'lbLabelDirection', 'lbLabelFont', + 'lbLabelFontAspectF', 'lbLabelFontColor', 'lbLabelFontHeightF', + 'lbLabelFontQuality', 'lbLabelFontThicknessF', 'lbLabelFuncCode', + 'lbLabelJust', 'lbLabelOffsetF', 'lbLabelPosition', 'lbLabelStride', + 'lbLabelStrings', 'lbLabelsOn', 'lbLeftMarginF', 'lbMaxLabelLenF', + 'lbMinLabelSpacingF', 'lbMonoFillColor', 'lbMonoFillPattern', + 'lbMonoFillScale', 'lbOrientation', 'lbPerimColor', + 'lbPerimDashPattern', 'lbPerimDashSegLenF', 'lbPerimFill', + 'lbPerimFillColor', 'lbPerimOn', 'lbPerimThicknessF', + 'lbRasterFillOn', 'lbRightMarginF', 'lbTitleAngleF', + 'lbTitleConstantSpacingF', 'lbTitleDirection', 'lbTitleExtentF', + 'lbTitleFont', 'lbTitleFontAspectF', 'lbTitleFontColor', + 'lbTitleFontHeightF', 'lbTitleFontQuality', 'lbTitleFontThicknessF', + 'lbTitleFuncCode', 'lbTitleJust', 'lbTitleOffsetF', 'lbTitleOn', + 'lbTitlePosition', 'lbTitleString', 'lbTopMarginF', 'lgAutoManage', + 'lgBottomMarginF', 'lgBoxBackground', 'lgBoxLineColor', + 'lgBoxLineDashPattern', 'lgBoxLineDashSegLenF', 'lgBoxLineThicknessF', + 'lgBoxLinesOn', 'lgBoxMajorExtentF', 'lgBoxMinorExtentF', + 'lgDashIndex', 'lgDashIndexes', 'lgItemCount', 'lgItemOrder', + 'lgItemPlacement', 'lgItemPositions', 'lgItemType', 'lgItemTypes', + 'lgJustification', 'lgLabelAlignment', 'lgLabelAngleF', + 'lgLabelAutoStride', 'lgLabelConstantSpacingF', 'lgLabelDirection', + 'lgLabelFont', 'lgLabelFontAspectF', 'lgLabelFontColor', + 'lgLabelFontHeightF', 'lgLabelFontQuality', 'lgLabelFontThicknessF', + 'lgLabelFuncCode', 'lgLabelJust', 'lgLabelOffsetF', 'lgLabelPosition', + 'lgLabelStride', 'lgLabelStrings', 'lgLabelsOn', 'lgLeftMarginF', + 'lgLegendOn', 'lgLineColor', 'lgLineColors', 'lgLineDashSegLenF', + 'lgLineDashSegLens', 'lgLineLabelConstantSpacingF', 'lgLineLabelFont', + 'lgLineLabelFontAspectF', 'lgLineLabelFontColor', + 'lgLineLabelFontColors', 'lgLineLabelFontHeightF', + 'lgLineLabelFontHeights', 'lgLineLabelFontQuality', + 'lgLineLabelFontThicknessF', 'lgLineLabelFuncCode', + 'lgLineLabelStrings', 'lgLineLabelsOn', 'lgLineThicknessF', + 'lgLineThicknesses', 'lgMarkerColor', 'lgMarkerColors', + 'lgMarkerIndex', 'lgMarkerIndexes', 'lgMarkerSizeF', 'lgMarkerSizes', + 'lgMarkerThicknessF', 'lgMarkerThicknesses', 'lgMonoDashIndex', + 'lgMonoItemType', 'lgMonoLineColor', 'lgMonoLineDashSegLen', + 'lgMonoLineLabelFontColor', 'lgMonoLineLabelFontHeight', + 'lgMonoLineThickness', 'lgMonoMarkerColor', 'lgMonoMarkerIndex', + 'lgMonoMarkerSize', 'lgMonoMarkerThickness', 'lgOrientation', + 'lgPerimColor', 'lgPerimDashPattern', 'lgPerimDashSegLenF', + 'lgPerimFill', 'lgPerimFillColor', 'lgPerimOn', 'lgPerimThicknessF', + 'lgRightMarginF', 'lgTitleAngleF', 'lgTitleConstantSpacingF', + 'lgTitleDirection', 'lgTitleExtentF', 'lgTitleFont', + 'lgTitleFontAspectF', 'lgTitleFontColor', 'lgTitleFontHeightF', + 'lgTitleFontQuality', 'lgTitleFontThicknessF', 'lgTitleFuncCode', + 'lgTitleJust', 'lgTitleOffsetF', 'lgTitleOn', 'lgTitlePosition', + 'lgTitleString', 'lgTopMarginF', 'mpAreaGroupCount', + 'mpAreaMaskingOn', 'mpAreaNames', 'mpAreaTypes', 'mpBottomAngleF', + 'mpBottomMapPosF', 'mpBottomNDCF', 'mpBottomNPCF', + 'mpBottomPointLatF', 'mpBottomPointLonF', 'mpBottomWindowF', + 'mpCenterLatF', 'mpCenterLonF', 'mpCenterRotF', 'mpCountyLineColor', + 'mpCountyLineDashPattern', 'mpCountyLineDashSegLenF', + 'mpCountyLineThicknessF', 'mpDataBaseVersion', 'mpDataResolution', + 'mpDataSetName', 'mpDefaultFillColor', 'mpDefaultFillPattern', + 'mpDefaultFillScaleF', 'mpDynamicAreaGroups', 'mpEllipticalBoundary', + 'mpFillAreaSpecifiers', 'mpFillBoundarySets', 'mpFillColor', + 'mpFillColors', 'mpFillColors-default', 'mpFillDotSizeF', + 'mpFillDrawOrder', 'mpFillOn', 'mpFillPatternBackground', + 'mpFillPattern', 'mpFillPatterns', 'mpFillPatterns-default', + 'mpFillScaleF', 'mpFillScales', 'mpFillScales-default', + 'mpFixedAreaGroups', 'mpGeophysicalLineColor', + 'mpGeophysicalLineDashPattern', 'mpGeophysicalLineDashSegLenF', + 'mpGeophysicalLineThicknessF', 'mpGreatCircleLinesOn', + 'mpGridAndLimbDrawOrder', 'mpGridAndLimbOn', 'mpGridLatSpacingF', + 'mpGridLineColor', 'mpGridLineDashPattern', 'mpGridLineDashSegLenF', + 'mpGridLineThicknessF', 'mpGridLonSpacingF', 'mpGridMaskMode', + 'mpGridMaxLatF', 'mpGridPolarLonSpacingF', 'mpGridSpacingF', + 'mpInlandWaterFillColor', 'mpInlandWaterFillPattern', + 'mpInlandWaterFillScaleF', 'mpLabelDrawOrder', 'mpLabelFontColor', + 'mpLabelFontHeightF', 'mpLabelsOn', 'mpLambertMeridianF', + 'mpLambertParallel1F', 'mpLambertParallel2F', 'mpLandFillColor', + 'mpLandFillPattern', 'mpLandFillScaleF', 'mpLeftAngleF', + 'mpLeftCornerLatF', 'mpLeftCornerLonF', 'mpLeftMapPosF', + 'mpLeftNDCF', 'mpLeftNPCF', 'mpLeftPointLatF', + 'mpLeftPointLonF', 'mpLeftWindowF', 'mpLimbLineColor', + 'mpLimbLineDashPattern', 'mpLimbLineDashSegLenF', + 'mpLimbLineThicknessF', 'mpLimitMode', 'mpMaskAreaSpecifiers', + 'mpMaskOutlineSpecifiers', 'mpMaxLatF', 'mpMaxLonF', + 'mpMinLatF', 'mpMinLonF', 'mpMonoFillColor', 'mpMonoFillPattern', + 'mpMonoFillScale', 'mpNationalLineColor', 'mpNationalLineDashPattern', + 'mpNationalLineThicknessF', 'mpOceanFillColor', 'mpOceanFillPattern', + 'mpOceanFillScaleF', 'mpOutlineBoundarySets', 'mpOutlineDrawOrder', + 'mpOutlineMaskingOn', 'mpOutlineOn', 'mpOutlineSpecifiers', + 'mpPerimDrawOrder', 'mpPerimLineColor', 'mpPerimLineDashPattern', + 'mpPerimLineDashSegLenF', 'mpPerimLineThicknessF', 'mpPerimOn', + 'mpPolyMode', 'mpProjection', 'mpProvincialLineColor', + 'mpProvincialLineDashPattern', 'mpProvincialLineDashSegLenF', + 'mpProvincialLineThicknessF', 'mpRelativeCenterLat', + 'mpRelativeCenterLon', 'mpRightAngleF', 'mpRightCornerLatF', + 'mpRightCornerLonF', 'mpRightMapPosF', 'mpRightNDCF', + 'mpRightNPCF', 'mpRightPointLatF', 'mpRightPointLonF', + 'mpRightWindowF', 'mpSatelliteAngle1F', 'mpSatelliteAngle2F', + 'mpSatelliteDistF', 'mpShapeMode', 'mpSpecifiedFillColors', + 'mpSpecifiedFillDirectIndexing', 'mpSpecifiedFillPatterns', + 'mpSpecifiedFillPriority', 'mpSpecifiedFillScales', + 'mpTopAngleF', 'mpTopMapPosF', 'mpTopNDCF', 'mpTopNPCF', + 'mpTopPointLatF', 'mpTopPointLonF', 'mpTopWindowF', + 'mpUSStateLineColor', 'mpUSStateLineDashPattern', + 'mpUSStateLineDashSegLenF', 'mpUSStateLineThicknessF', + 'pmAnnoManagers', 'pmAnnoViews', 'pmLabelBarDisplayMode', + 'pmLabelBarHeightF', 'pmLabelBarKeepAspect', 'pmLabelBarOrthogonalPosF', + 'pmLabelBarParallelPosF', 'pmLabelBarSide', 'pmLabelBarWidthF', + 'pmLabelBarZone', 'pmLegendDisplayMode', 'pmLegendHeightF', + 'pmLegendKeepAspect', 'pmLegendOrthogonalPosF', + 'pmLegendParallelPosF', 'pmLegendSide', 'pmLegendWidthF', + 'pmLegendZone', 'pmOverlaySequenceIds', 'pmTickMarkDisplayMode', + 'pmTickMarkZone', 'pmTitleDisplayMode', 'pmTitleZone', + 'prGraphicStyle', 'prPolyType', 'prXArray', 'prYArray', + 'sfCopyData', 'sfDataArray', 'sfDataMaxV', 'sfDataMinV', + 'sfElementNodes', 'sfExchangeDimensions', 'sfFirstNodeIndex', + 'sfMissingValueV', 'sfXArray', 'sfXCActualEndF', 'sfXCActualStartF', + 'sfXCEndIndex', 'sfXCEndSubsetV', 'sfXCEndV', 'sfXCStartIndex', + 'sfXCStartSubsetV', 'sfXCStartV', 'sfXCStride', 'sfXCellBounds', + 'sfYArray', 'sfYCActualEndF', 'sfYCActualStartF', 'sfYCEndIndex', + 'sfYCEndSubsetV', 'sfYCEndV', 'sfYCStartIndex', 'sfYCStartSubsetV', + 'sfYCStartV', 'sfYCStride', 'sfYCellBounds', 'stArrowLengthF', + 'stArrowStride', 'stCrossoverCheckCount', + 'stExplicitLabelBarLabelsOn', 'stLabelBarEndLabelsOn', + 'stLabelFormat', 'stLengthCheckCount', 'stLevelColors', + 'stLevelCount', 'stLevelPalette', 'stLevelSelectionMode', + 'stLevelSpacingF', 'stLevels', 'stLineColor', 'stLineOpacityF', + 'stLineStartStride', 'stLineThicknessF', 'stMapDirection', + 'stMaxLevelCount', 'stMaxLevelValF', 'stMinArrowSpacingF', + 'stMinDistanceF', 'stMinLevelValF', 'stMinLineSpacingF', + 'stMinStepFactorF', 'stMonoLineColor', 'stNoDataLabelOn', + 'stNoDataLabelString', 'stScalarFieldData', 'stScalarMissingValColor', + 'stSpanLevelPalette', 'stStepSizeF', 'stStreamlineDrawOrder', + 'stUseScalarArray', 'stVectorFieldData', 'stZeroFLabelAngleF', + 'stZeroFLabelBackgroundColor', 'stZeroFLabelConstantSpacingF', + 'stZeroFLabelFont', 'stZeroFLabelFontAspectF', + 'stZeroFLabelFontColor', 'stZeroFLabelFontHeightF', + 'stZeroFLabelFontQuality', 'stZeroFLabelFontThicknessF', + 'stZeroFLabelFuncCode', 'stZeroFLabelJust', 'stZeroFLabelOn', + 'stZeroFLabelOrthogonalPosF', 'stZeroFLabelParallelPosF', + 'stZeroFLabelPerimColor', 'stZeroFLabelPerimOn', + 'stZeroFLabelPerimSpaceF', 'stZeroFLabelPerimThicknessF', + 'stZeroFLabelSide', 'stZeroFLabelString', 'stZeroFLabelTextDirection', + 'stZeroFLabelZone', 'tfDoNDCOverlay', 'tfPlotManagerOn', + 'tfPolyDrawList', 'tfPolyDrawOrder', 'tiDeltaF', 'tiMainAngleF', + 'tiMainConstantSpacingF', 'tiMainDirection', 'tiMainFont', + 'tiMainFontAspectF', 'tiMainFontColor', 'tiMainFontHeightF', + 'tiMainFontQuality', 'tiMainFontThicknessF', 'tiMainFuncCode', + 'tiMainJust', 'tiMainOffsetXF', 'tiMainOffsetYF', 'tiMainOn', + 'tiMainPosition', 'tiMainSide', 'tiMainString', 'tiUseMainAttributes', + 'tiXAxisAngleF', 'tiXAxisConstantSpacingF', 'tiXAxisDirection', + 'tiXAxisFont', 'tiXAxisFontAspectF', 'tiXAxisFontColor', + 'tiXAxisFontHeightF', 'tiXAxisFontQuality', 'tiXAxisFontThicknessF', + 'tiXAxisFuncCode', 'tiXAxisJust', 'tiXAxisOffsetXF', + 'tiXAxisOffsetYF', 'tiXAxisOn', 'tiXAxisPosition', 'tiXAxisSide', + 'tiXAxisString', 'tiYAxisAngleF', 'tiYAxisConstantSpacingF', + 'tiYAxisDirection', 'tiYAxisFont', 'tiYAxisFontAspectF', + 'tiYAxisFontColor', 'tiYAxisFontHeightF', 'tiYAxisFontQuality', + 'tiYAxisFontThicknessF', 'tiYAxisFuncCode', 'tiYAxisJust', + 'tiYAxisOffsetXF', 'tiYAxisOffsetYF', 'tiYAxisOn', 'tiYAxisPosition', + 'tiYAxisSide', 'tiYAxisString', 'tmBorderLineColor', + 'tmBorderThicknessF', 'tmEqualizeXYSizes', 'tmLabelAutoStride', + 'tmSciNoteCutoff', 'tmXBAutoPrecision', 'tmXBBorderOn', + 'tmXBDataLeftF', 'tmXBDataRightF', 'tmXBFormat', 'tmXBIrrTensionF', + 'tmXBIrregularPoints', 'tmXBLabelAngleF', 'tmXBLabelConstantSpacingF', + 'tmXBLabelDeltaF', 'tmXBLabelDirection', 'tmXBLabelFont', + 'tmXBLabelFontAspectF', 'tmXBLabelFontColor', 'tmXBLabelFontHeightF', + 'tmXBLabelFontQuality', 'tmXBLabelFontThicknessF', + 'tmXBLabelFuncCode', 'tmXBLabelJust', 'tmXBLabelStride', 'tmXBLabels', + 'tmXBLabelsOn', 'tmXBMajorLengthF', 'tmXBMajorLineColor', + 'tmXBMajorOutwardLengthF', 'tmXBMajorThicknessF', 'tmXBMaxLabelLenF', + 'tmXBMaxTicks', 'tmXBMinLabelSpacingF', 'tmXBMinorLengthF', + 'tmXBMinorLineColor', 'tmXBMinorOn', 'tmXBMinorOutwardLengthF', + 'tmXBMinorPerMajor', 'tmXBMinorThicknessF', 'tmXBMinorValues', + 'tmXBMode', 'tmXBOn', 'tmXBPrecision', 'tmXBStyle', 'tmXBTickEndF', + 'tmXBTickSpacingF', 'tmXBTickStartF', 'tmXBValues', 'tmXMajorGrid', + 'tmXMajorGridLineColor', 'tmXMajorGridLineDashPattern', + 'tmXMajorGridThicknessF', 'tmXMinorGrid', 'tmXMinorGridLineColor', + 'tmXMinorGridLineDashPattern', 'tmXMinorGridThicknessF', + 'tmXTAutoPrecision', 'tmXTBorderOn', 'tmXTDataLeftF', + 'tmXTDataRightF', 'tmXTFormat', 'tmXTIrrTensionF', + 'tmXTIrregularPoints', 'tmXTLabelAngleF', 'tmXTLabelConstantSpacingF', + 'tmXTLabelDeltaF', 'tmXTLabelDirection', 'tmXTLabelFont', + 'tmXTLabelFontAspectF', 'tmXTLabelFontColor', 'tmXTLabelFontHeightF', + 'tmXTLabelFontQuality', 'tmXTLabelFontThicknessF', + 'tmXTLabelFuncCode', 'tmXTLabelJust', 'tmXTLabelStride', 'tmXTLabels', + 'tmXTLabelsOn', 'tmXTMajorLengthF', 'tmXTMajorLineColor', + 'tmXTMajorOutwardLengthF', 'tmXTMajorThicknessF', 'tmXTMaxLabelLenF', + 'tmXTMaxTicks', 'tmXTMinLabelSpacingF', 'tmXTMinorLengthF', + 'tmXTMinorLineColor', 'tmXTMinorOn', 'tmXTMinorOutwardLengthF', + 'tmXTMinorPerMajor', 'tmXTMinorThicknessF', 'tmXTMinorValues', + 'tmXTMode', 'tmXTOn', 'tmXTPrecision', 'tmXTStyle', 'tmXTTickEndF', + 'tmXTTickSpacingF', 'tmXTTickStartF', 'tmXTValues', 'tmXUseBottom', + 'tmYLAutoPrecision', 'tmYLBorderOn', 'tmYLDataBottomF', + 'tmYLDataTopF', 'tmYLFormat', 'tmYLIrrTensionF', + 'tmYLIrregularPoints', 'tmYLLabelAngleF', 'tmYLLabelConstantSpacingF', + 'tmYLLabelDeltaF', 'tmYLLabelDirection', 'tmYLLabelFont', + 'tmYLLabelFontAspectF', 'tmYLLabelFontColor', 'tmYLLabelFontHeightF', + 'tmYLLabelFontQuality', 'tmYLLabelFontThicknessF', + 'tmYLLabelFuncCode', 'tmYLLabelJust', 'tmYLLabelStride', 'tmYLLabels', + 'tmYLLabelsOn', 'tmYLMajorLengthF', 'tmYLMajorLineColor', + 'tmYLMajorOutwardLengthF', 'tmYLMajorThicknessF', 'tmYLMaxLabelLenF', + 'tmYLMaxTicks', 'tmYLMinLabelSpacingF', 'tmYLMinorLengthF', + 'tmYLMinorLineColor', 'tmYLMinorOn', 'tmYLMinorOutwardLengthF', + 'tmYLMinorPerMajor', 'tmYLMinorThicknessF', 'tmYLMinorValues', + 'tmYLMode', 'tmYLOn', 'tmYLPrecision', 'tmYLStyle', 'tmYLTickEndF', + 'tmYLTickSpacingF', 'tmYLTickStartF', 'tmYLValues', 'tmYMajorGrid', + 'tmYMajorGridLineColor', 'tmYMajorGridLineDashPattern', + 'tmYMajorGridThicknessF', 'tmYMinorGrid', 'tmYMinorGridLineColor', + 'tmYMinorGridLineDashPattern', 'tmYMinorGridThicknessF', + 'tmYRAutoPrecision', 'tmYRBorderOn', 'tmYRDataBottomF', + 'tmYRDataTopF', 'tmYRFormat', 'tmYRIrrTensionF', + 'tmYRIrregularPoints', 'tmYRLabelAngleF', 'tmYRLabelConstantSpacingF', + 'tmYRLabelDeltaF', 'tmYRLabelDirection', 'tmYRLabelFont', + 'tmYRLabelFontAspectF', 'tmYRLabelFontColor', 'tmYRLabelFontHeightF', + 'tmYRLabelFontQuality', 'tmYRLabelFontThicknessF', + 'tmYRLabelFuncCode', 'tmYRLabelJust', 'tmYRLabelStride', 'tmYRLabels', + 'tmYRLabelsOn', 'tmYRMajorLengthF', 'tmYRMajorLineColor', + 'tmYRMajorOutwardLengthF', 'tmYRMajorThicknessF', 'tmYRMaxLabelLenF', + 'tmYRMaxTicks', 'tmYRMinLabelSpacingF', 'tmYRMinorLengthF', + 'tmYRMinorLineColor', 'tmYRMinorOn', 'tmYRMinorOutwardLengthF', + 'tmYRMinorPerMajor', 'tmYRMinorThicknessF', 'tmYRMinorValues', + 'tmYRMode', 'tmYROn', 'tmYRPrecision', 'tmYRStyle', 'tmYRTickEndF', + 'tmYRTickSpacingF', 'tmYRTickStartF', 'tmYRValues', 'tmYUseLeft', + 'trGridType', 'trLineInterpolationOn', + 'trXAxisType', 'trXCoordPoints', 'trXInterPoints', 'trXLog', + 'trXMaxF', 'trXMinF', 'trXReverse', 'trXSamples', 'trXTensionF', + 'trYAxisType', 'trYCoordPoints', 'trYInterPoints', 'trYLog', + 'trYMaxF', 'trYMinF', 'trYReverse', 'trYSamples', 'trYTensionF', + 'txAngleF', 'txBackgroundFillColor', 'txConstantSpacingF', 'txDirection', + 'txFont', 'HLU-Fonts', 'txFontAspectF', 'txFontColor', + 'txFontHeightF', 'txFontOpacityF', 'txFontQuality', + 'txFontThicknessF', 'txFuncCode', 'txJust', 'txPerimColor', + 'txPerimDashLengthF', 'txPerimDashPattern', 'txPerimOn', + 'txPerimSpaceF', 'txPerimThicknessF', 'txPosXF', 'txPosYF', + 'txString', 'vcExplicitLabelBarLabelsOn', 'vcFillArrowEdgeColor', + 'vcFillArrowEdgeThicknessF', 'vcFillArrowFillColor', + 'vcFillArrowHeadInteriorXF', 'vcFillArrowHeadMinFracXF', + 'vcFillArrowHeadMinFracYF', 'vcFillArrowHeadXF', 'vcFillArrowHeadYF', + 'vcFillArrowMinFracWidthF', 'vcFillArrowWidthF', 'vcFillArrowsOn', + 'vcFillOverEdge', 'vcGlyphOpacityF', 'vcGlyphStyle', + 'vcLabelBarEndLabelsOn', 'vcLabelFontColor', 'vcLabelFontHeightF', + 'vcLabelsOn', 'vcLabelsUseVectorColor', 'vcLevelColors', + 'vcLevelCount', 'vcLevelPalette', 'vcLevelSelectionMode', + 'vcLevelSpacingF', 'vcLevels', 'vcLineArrowColor', + 'vcLineArrowHeadMaxSizeF', 'vcLineArrowHeadMinSizeF', + 'vcLineArrowThicknessF', 'vcMagnitudeFormat', + 'vcMagnitudeScaleFactorF', 'vcMagnitudeScaleValueF', + 'vcMagnitudeScalingMode', 'vcMapDirection', 'vcMaxLevelCount', + 'vcMaxLevelValF', 'vcMaxMagnitudeF', 'vcMinAnnoAngleF', + 'vcMinAnnoArrowAngleF', 'vcMinAnnoArrowEdgeColor', + 'vcMinAnnoArrowFillColor', 'vcMinAnnoArrowLineColor', + 'vcMinAnnoArrowMinOffsetF', 'vcMinAnnoArrowSpaceF', + 'vcMinAnnoArrowUseVecColor', 'vcMinAnnoBackgroundColor', + 'vcMinAnnoConstantSpacingF', 'vcMinAnnoExplicitMagnitudeF', + 'vcMinAnnoFont', 'vcMinAnnoFontAspectF', 'vcMinAnnoFontColor', + 'vcMinAnnoFontHeightF', 'vcMinAnnoFontQuality', + 'vcMinAnnoFontThicknessF', 'vcMinAnnoFuncCode', 'vcMinAnnoJust', + 'vcMinAnnoOn', 'vcMinAnnoOrientation', 'vcMinAnnoOrthogonalPosF', + 'vcMinAnnoParallelPosF', 'vcMinAnnoPerimColor', 'vcMinAnnoPerimOn', + 'vcMinAnnoPerimSpaceF', 'vcMinAnnoPerimThicknessF', 'vcMinAnnoSide', + 'vcMinAnnoString1', 'vcMinAnnoString1On', 'vcMinAnnoString2', + 'vcMinAnnoString2On', 'vcMinAnnoTextDirection', 'vcMinAnnoZone', + 'vcMinDistanceF', 'vcMinFracLengthF', 'vcMinLevelValF', + 'vcMinMagnitudeF', 'vcMonoFillArrowEdgeColor', + 'vcMonoFillArrowFillColor', 'vcMonoLineArrowColor', + 'vcMonoWindBarbColor', 'vcNoDataLabelOn', 'vcNoDataLabelString', + 'vcPositionMode', 'vcRefAnnoAngleF', 'vcRefAnnoArrowAngleF', + 'vcRefAnnoArrowEdgeColor', 'vcRefAnnoArrowFillColor', + 'vcRefAnnoArrowLineColor', 'vcRefAnnoArrowMinOffsetF', + 'vcRefAnnoArrowSpaceF', 'vcRefAnnoArrowUseVecColor', + 'vcRefAnnoBackgroundColor', 'vcRefAnnoConstantSpacingF', + 'vcRefAnnoExplicitMagnitudeF', 'vcRefAnnoFont', + 'vcRefAnnoFontAspectF', 'vcRefAnnoFontColor', 'vcRefAnnoFontHeightF', + 'vcRefAnnoFontQuality', 'vcRefAnnoFontThicknessF', + 'vcRefAnnoFuncCode', 'vcRefAnnoJust', 'vcRefAnnoOn', + 'vcRefAnnoOrientation', 'vcRefAnnoOrthogonalPosF', + 'vcRefAnnoParallelPosF', 'vcRefAnnoPerimColor', 'vcRefAnnoPerimOn', + 'vcRefAnnoPerimSpaceF', 'vcRefAnnoPerimThicknessF', 'vcRefAnnoSide', + 'vcRefAnnoString1', 'vcRefAnnoString1On', 'vcRefAnnoString2', + 'vcRefAnnoString2On', 'vcRefAnnoTextDirection', 'vcRefAnnoZone', + 'vcRefLengthF', 'vcRefMagnitudeF', 'vcScalarFieldData', + 'vcScalarMissingValColor', 'vcScalarValueFormat', + 'vcScalarValueScaleFactorF', 'vcScalarValueScaleValueF', + 'vcScalarValueScalingMode', 'vcSpanLevelPalette', 'vcUseRefAnnoRes', + 'vcUseScalarArray', 'vcVectorDrawOrder', 'vcVectorFieldData', + 'vcWindBarbCalmCircleSizeF', 'vcWindBarbColor', + 'vcWindBarbLineThicknessF', 'vcWindBarbScaleFactorF', + 'vcWindBarbTickAngleF', 'vcWindBarbTickLengthF', + 'vcWindBarbTickSpacingF', 'vcZeroFLabelAngleF', + 'vcZeroFLabelBackgroundColor', 'vcZeroFLabelConstantSpacingF', + 'vcZeroFLabelFont', 'vcZeroFLabelFontAspectF', + 'vcZeroFLabelFontColor', 'vcZeroFLabelFontHeightF', + 'vcZeroFLabelFontQuality', 'vcZeroFLabelFontThicknessF', + 'vcZeroFLabelFuncCode', 'vcZeroFLabelJust', 'vcZeroFLabelOn', + 'vcZeroFLabelOrthogonalPosF', 'vcZeroFLabelParallelPosF', + 'vcZeroFLabelPerimColor', 'vcZeroFLabelPerimOn', + 'vcZeroFLabelPerimSpaceF', 'vcZeroFLabelPerimThicknessF', + 'vcZeroFLabelSide', 'vcZeroFLabelString', 'vcZeroFLabelTextDirection', + 'vcZeroFLabelZone', 'vfCopyData', 'vfDataArray', + 'vfExchangeDimensions', 'vfExchangeUVData', 'vfMagMaxV', 'vfMagMinV', + 'vfMissingUValueV', 'vfMissingVValueV', 'vfPolarData', + 'vfSingleMissingValue', 'vfUDataArray', 'vfUMaxV', 'vfUMinV', + 'vfVDataArray', 'vfVMaxV', 'vfVMinV', 'vfXArray', 'vfXCActualEndF', + 'vfXCActualStartF', 'vfXCEndIndex', 'vfXCEndSubsetV', 'vfXCEndV', + 'vfXCStartIndex', 'vfXCStartSubsetV', 'vfXCStartV', 'vfXCStride', + 'vfYArray', 'vfYCActualEndF', 'vfYCActualStartF', 'vfYCEndIndex', + 'vfYCEndSubsetV', 'vfYCEndV', 'vfYCStartIndex', 'vfYCStartSubsetV', + 'vfYCStartV', 'vfYCStride', 'vpAnnoManagerId', 'vpClipOn', + 'vpHeightF', 'vpKeepAspect', 'vpOn', 'vpUseSegments', 'vpWidthF', + 'vpXF', 'vpYF', 'wkAntiAlias', 'wkBackgroundColor', 'wkBackgroundOpacityF', + 'wkColorMapLen', 'wkColorMap', 'wkColorModel', 'wkDashTableLength', + 'wkDefGraphicStyleId', 'wkDeviceLowerX', 'wkDeviceLowerY', + 'wkDeviceUpperX', 'wkDeviceUpperY', 'wkFileName', 'wkFillTableLength', + 'wkForegroundColor', 'wkFormat', 'wkFullBackground', 'wkGksWorkId', + 'wkHeight', 'wkMarkerTableLength', 'wkMetaName', 'wkOrientation', + 'wkPDFFileName', 'wkPDFFormat', 'wkPDFResolution', 'wkPSFileName', + 'wkPSFormat', 'wkPSResolution', 'wkPaperHeightF', 'wkPaperSize', + 'wkPaperWidthF', 'wkPause', 'wkTopLevelViews', 'wkViews', + 'wkVisualType', 'wkWidth', 'wkWindowId', 'wkXColorMode', 'wsCurrentSize', + 'wsMaximumSize', 'wsThresholdSize', 'xyComputeXMax', + 'xyComputeXMin', 'xyComputeYMax', 'xyComputeYMin', 'xyCoordData', + 'xyCoordDataSpec', 'xyCurveDrawOrder', 'xyDashPattern', + 'xyDashPatterns', 'xyExplicitLabels', 'xyExplicitLegendLabels', + 'xyLabelMode', 'xyLineColor', 'xyLineColors', 'xyLineDashSegLenF', + 'xyLineLabelConstantSpacingF', 'xyLineLabelFont', + 'xyLineLabelFontAspectF', 'xyLineLabelFontColor', + 'xyLineLabelFontColors', 'xyLineLabelFontHeightF', + 'xyLineLabelFontQuality', 'xyLineLabelFontThicknessF', + 'xyLineLabelFuncCode', 'xyLineThicknessF', 'xyLineThicknesses', + 'xyMarkLineMode', 'xyMarkLineModes', 'xyMarker', 'xyMarkerColor', + 'xyMarkerColors', 'xyMarkerSizeF', 'xyMarkerSizes', + 'xyMarkerThicknessF', 'xyMarkerThicknesses', 'xyMarkers', + 'xyMonoDashPattern', 'xyMonoLineColor', 'xyMonoLineLabelFontColor', + 'xyMonoLineThickness', 'xyMonoMarkLineMode', 'xyMonoMarker', + 'xyMonoMarkerColor', 'xyMonoMarkerSize', 'xyMonoMarkerThickness', + 'xyXIrrTensionF', 'xyXIrregularPoints', 'xyXStyle', 'xyYIrrTensionF', + 'xyYIrregularPoints', 'xyYStyle'), prefix=r'\b'), + Name.Builtin), + + # Booleans + (r'\.(True|False)\.', Name.Builtin), + # Comparing Operators + (r'\.(eq|ne|lt|le|gt|ge|not|and|or|xor)\.', Operator.Word), + ], + + 'strings': [ + (r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double), + ], + + 'nums': [ + (r'\d+(?![.e])(_[a-z]\w+)?', Number.Integer), + (r'[+-]?\d*\.\d+(e[-+]?\d+)?(_[a-z]\w+)?', Number.Float), + (r'[+-]?\d+\.\d*(e[-+]?\d+)?(_[a-z]\w+)?', Number.Float), + ], + } diff --git a/pygments/lexers/nimrod.py b/pygments/lexers/nimrod.py old mode 100644 new mode 100755 index 4c1bccf..1fe8043 --- a/pygments/lexers/nimrod.py +++ b/pygments/lexers/nimrod.py @@ -1,159 +1,159 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.nimrod - ~~~~~~~~~~~~~~~~~~~~~~ - - Lexer for the Nim language (formerly known as Nimrod). - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, include, default -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Error - -__all__ = ['NimrodLexer'] - - -class NimrodLexer(RegexLexer): - """ - For `Nim `_ source code. - - .. versionadded:: 1.5 - """ - - name = 'Nimrod' - aliases = ['nim', 'nimrod'] - filenames = ['*.nim', '*.nimrod'] - mimetypes = ['text/x-nim'] - - flags = re.MULTILINE | re.IGNORECASE | re.UNICODE - - def underscorize(words): - newWords = [] - new = "" - for word in words: - for ch in word: - new += (ch + "_?") - newWords.append(new) - new = "" - return "|".join(newWords) - - keywords = [ - 'addr', 'and', 'as', 'asm', 'atomic', 'bind', 'block', 'break', 'case', - 'cast', 'concept', 'const', 'continue', 'converter', 'defer', 'discard', - 'distinct', 'div', 'do', 'elif', 'else', 'end', 'enum', 'except', - 'export', 'finally', 'for', 'func', 'if', 'in', 'yield', 'interface', - 'is', 'isnot', 'iterator', 'let', 'macro', 'method', 'mixin', 'mod', - 'not', 'notin', 'object', 'of', 'or', 'out', 'proc', 'ptr', 'raise', - 'ref', 'return', 'shared', 'shl', 'shr', 'static', 'template', 'try', - 'tuple', 'type', 'when', 'while', 'with', 'without', 'xor' - ] - - keywordsPseudo = [ - 'nil', 'true', 'false' - ] - - opWords = [ - 'and', 'or', 'not', 'xor', 'shl', 'shr', 'div', 'mod', 'in', - 'notin', 'is', 'isnot' - ] - - types = [ - 'int', 'int8', 'int16', 'int32', 'int64', 'float', 'float32', 'float64', - 'bool', 'char', 'range', 'array', 'seq', 'set', 'string' - ] - - tokens = { - 'root': [ - (r'##.*$', String.Doc), - (r'#.*$', Comment), - (r'[*=><+\-/@$~&%!?|\\\[\]]', Operator), - (r'\.\.|\.|,|\[\.|\.\]|\{\.|\.\}|\(\.|\.\)|\{|\}|\(|\)|:|\^|`|;', - Punctuation), - - # Strings - (r'(?:[\w]+)"', String, 'rdqs'), - (r'"""', String, 'tdqs'), - ('"', String, 'dqs'), - - # Char - ("'", String.Char, 'chars'), - - # Keywords - (r'(%s)\b' % underscorize(opWords), Operator.Word), - (r'(p_?r_?o_?c_?\s)(?![(\[\]])', Keyword, 'funcname'), - (r'(%s)\b' % underscorize(keywords), Keyword), - (r'(%s)\b' % underscorize(['from', 'import', 'include']), - Keyword.Namespace), - (r'(v_?a_?r)\b', Keyword.Declaration), - (r'(%s)\b' % underscorize(types), Keyword.Type), - (r'(%s)\b' % underscorize(keywordsPseudo), Keyword.Pseudo), - # Identifiers - (r'\b((?![_\d])\w)(((?!_)\w)|(_(?!_)\w))*', Name), - # Numbers - (r'[0-9][0-9_]*(?=([e.]|\'f(32|64)))', - Number.Float, ('float-suffix', 'float-number')), - (r'0x[a-f0-9][a-f0-9_]*', Number.Hex, 'int-suffix'), - (r'0b[01][01_]*', Number.Bin, 'int-suffix'), - (r'0o[0-7][0-7_]*', Number.Oct, 'int-suffix'), - (r'[0-9][0-9_]*', Number.Integer, 'int-suffix'), - # Whitespace - (r'\s+', Text), - (r'.+$', Error), - ], - 'chars': [ - (r'\\([\\abcefnrtvl"\']|x[a-f0-9]{2}|[0-9]{1,3})', String.Escape), - (r"'", String.Char, '#pop'), - (r".", String.Char) - ], - 'strings': [ - (r'(?`_ source code. + + .. versionadded:: 1.5 + """ + + name = 'Nimrod' + aliases = ['nim', 'nimrod'] + filenames = ['*.nim', '*.nimrod'] + mimetypes = ['text/x-nim'] + + flags = re.MULTILINE | re.IGNORECASE | re.UNICODE + + def underscorize(words): + newWords = [] + new = "" + for word in words: + for ch in word: + new += (ch + "_?") + newWords.append(new) + new = "" + return "|".join(newWords) + + keywords = [ + 'addr', 'and', 'as', 'asm', 'bind', 'block', 'break', 'case', + 'cast', 'concept', 'const', 'continue', 'converter', 'defer', 'discard', + 'distinct', 'div', 'do', 'elif', 'else', 'end', 'enum', 'except', + 'export', 'finally', 'for', 'func', 'if', 'in', 'yield', 'interface', + 'is', 'isnot', 'iterator', 'let', 'macro', 'method', 'mixin', 'mod', + 'not', 'notin', 'object', 'of', 'or', 'out', 'proc', 'ptr', 'raise', + 'ref', 'return', 'shl', 'shr', 'static', 'template', 'try', + 'tuple', 'type', 'using', 'when', 'while', 'xor' + ] + + keywordsPseudo = [ + 'nil', 'true', 'false' + ] + + opWords = [ + 'and', 'or', 'not', 'xor', 'shl', 'shr', 'div', 'mod', 'in', + 'notin', 'is', 'isnot' + ] + + types = [ + 'int', 'int8', 'int16', 'int32', 'int64', 'float', 'float32', 'float64', + 'bool', 'char', 'range', 'array', 'seq', 'set', 'string' + ] + + tokens = { + 'root': [ + (r'##.*$', String.Doc), + (r'#.*$', Comment), + (r'[*=><+\-/@$~&%!?|\\\[\]]', Operator), + (r'\.\.|\.|,|\[\.|\.\]|\{\.|\.\}|\(\.|\.\)|\{|\}|\(|\)|:|\^|`|;', + Punctuation), + + # Strings + (r'(?:[\w]+)"', String, 'rdqs'), + (r'"""', String, 'tdqs'), + ('"', String, 'dqs'), + + # Char + ("'", String.Char, 'chars'), + + # Keywords + (r'(%s)\b' % underscorize(opWords), Operator.Word), + (r'(p_?r_?o_?c_?\s)(?![(\[\]])', Keyword, 'funcname'), + (r'(%s)\b' % underscorize(keywords), Keyword), + (r'(%s)\b' % underscorize(['from', 'import', 'include']), + Keyword.Namespace), + (r'(v_?a_?r)\b', Keyword.Declaration), + (r'(%s)\b' % underscorize(types), Keyword.Type), + (r'(%s)\b' % underscorize(keywordsPseudo), Keyword.Pseudo), + # Identifiers + (r'\b((?![_\d])\w)(((?!_)\w)|(_(?!_)\w))*', Name), + # Numbers + (r'[0-9][0-9_]*(?=([e.]|\'f(32|64)))', + Number.Float, ('float-suffix', 'float-number')), + (r'0x[a-f0-9][a-f0-9_]*', Number.Hex, 'int-suffix'), + (r'0b[01][01_]*', Number.Bin, 'int-suffix'), + (r'0o[0-7][0-7_]*', Number.Oct, 'int-suffix'), + (r'[0-9][0-9_]*', Number.Integer, 'int-suffix'), + # Whitespace + (r'\s+', Text), + (r'.+$', Error), + ], + 'chars': [ + (r'\\([\\abcefnrtvl"\']|x[a-f0-9]{2}|[0-9]{1,3})', String.Escape), + (r"'", String.Char, '#pop'), + (r".", String.Char) + ], + 'strings': [ + (r'(?`_ source. - - .. versionadded:: 2.0 - """ - - name = 'Nit' - aliases = ['nit'] - filenames = ['*.nit'] - tokens = { - 'root': [ - (r'#.*?$', Comment.Single), - (words(( - 'package', 'module', 'import', 'class', 'abstract', 'interface', - 'universal', 'enum', 'end', 'fun', 'type', 'init', 'redef', - 'isa', 'do', 'readable', 'writable', 'var', 'intern', 'extern', - 'public', 'protected', 'private', 'intrude', 'if', 'then', - 'else', 'while', 'loop', 'for', 'in', 'and', 'or', 'not', - 'implies', 'return', 'continue', 'break', 'abort', 'assert', - 'new', 'is', 'once', 'super', 'self', 'true', 'false', 'nullable', - 'null', 'as', 'isset', 'label', '__debug__'), suffix=r'(?=[\r\n\t( ])'), - Keyword), - (r'[A-Z]\w*', Name.Class), - (r'"""(([^\'\\]|\\.)|\\r|\\n)*((\{\{?)?(""?\{\{?)*""""*)', String), # Simple long string - (r'\'\'\'(((\\.|[^\'\\])|\\r|\\n)|\'((\\.|[^\'\\])|\\r|\\n)|' - r'\'\'((\\.|[^\'\\])|\\r|\\n))*\'\'\'', String), # Simple long string alt - (r'"""(([^\'\\]|\\.)|\\r|\\n)*((""?)?(\{\{?""?)*\{\{\{\{*)', String), # Start long string - (r'\}\}\}(((\\.|[^\'\\])|\\r|\\n))*(""?)?(\{\{?""?)*\{\{\{\{*', String), # Mid long string - (r'\}\}\}(((\\.|[^\'\\])|\\r|\\n))*(\{\{?)?(""?\{\{?)*""""*', String), # End long string - (r'"(\\.|([^"}{\\]))*"', String), # Simple String - (r'"(\\.|([^"}{\\]))*\{', String), # Start string - (r'\}(\\.|([^"}{\\]))*\{', String), # Mid String - (r'\}(\\.|([^"}{\\]))*"', String), # End String - (r'(\'[^\'\\]\')|(\'\\.\')', String.Char), - (r'[0-9]+', Number.Integer), - (r'[0-9]*.[0-9]+', Number.Float), - (r'0(x|X)[0-9A-Fa-f]+', Number.Hex), - (r'[a-z]\w*', Name), - (r'_\w+', Name.Variable.Instance), - (r'==|!=|<==>|>=|>>|>|<=|<<|<|\+|-|=|/|\*|%|\+=|-=|!|@', Operator), - (r'\(|\)|\[|\]|,|\.\.\.|\.\.|\.|::|:', Punctuation), - (r'`\{[^`]*`\}', Text), # Extern blocks won't be Lexed by Nit - (r'[\r\n\t ]+', Text), - ], - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.nit + ~~~~~~~~~~~~~~~~~~~ + + Lexer for the Nit language. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['NitLexer'] + + +class NitLexer(RegexLexer): + """ + For `nit `_ source. + + .. versionadded:: 2.0 + """ + + name = 'Nit' + aliases = ['nit'] + filenames = ['*.nit'] + tokens = { + 'root': [ + (r'#.*?$', Comment.Single), + (words(( + 'package', 'module', 'import', 'class', 'abstract', 'interface', + 'universal', 'enum', 'end', 'fun', 'type', 'init', 'redef', + 'isa', 'do', 'readable', 'writable', 'var', 'intern', 'extern', + 'public', 'protected', 'private', 'intrude', 'if', 'then', + 'else', 'while', 'loop', 'for', 'in', 'and', 'or', 'not', + 'implies', 'return', 'continue', 'break', 'abort', 'assert', + 'new', 'is', 'once', 'super', 'self', 'true', 'false', 'nullable', + 'null', 'as', 'isset', 'label', '__debug__'), suffix=r'(?=[\r\n\t( ])'), + Keyword), + (r'[A-Z]\w*', Name.Class), + (r'"""(([^\'\\]|\\.)|\\r|\\n)*((\{\{?)?(""?\{\{?)*""""*)', String), # Simple long string + (r'\'\'\'(((\\.|[^\'\\])|\\r|\\n)|\'((\\.|[^\'\\])|\\r|\\n)|' + r'\'\'((\\.|[^\'\\])|\\r|\\n))*\'\'\'', String), # Simple long string alt + (r'"""(([^\'\\]|\\.)|\\r|\\n)*((""?)?(\{\{?""?)*\{\{\{\{*)', String), # Start long string + (r'\}\}\}(((\\.|[^\'\\])|\\r|\\n))*(""?)?(\{\{?""?)*\{\{\{\{*', String), # Mid long string + (r'\}\}\}(((\\.|[^\'\\])|\\r|\\n))*(\{\{?)?(""?\{\{?)*""""*', String), # End long string + (r'"(\\.|([^"}{\\]))*"', String), # Simple String + (r'"(\\.|([^"}{\\]))*\{', String), # Start string + (r'\}(\\.|([^"}{\\]))*\{', String), # Mid String + (r'\}(\\.|([^"}{\\]))*"', String), # End String + (r'(\'[^\'\\]\')|(\'\\.\')', String.Char), + (r'[0-9]+', Number.Integer), + (r'[0-9]*.[0-9]+', Number.Float), + (r'0(x|X)[0-9A-Fa-f]+', Number.Hex), + (r'[a-z]\w*', Name), + (r'_\w+', Name.Variable.Instance), + (r'==|!=|<==>|>=|>>|>|<=|<<|<|\+|-|=|/|\*|%|\+=|-=|!|@', Operator), + (r'\(|\)|\[|\]|,|\.\.\.|\.\.|\.|::|:', Punctuation), + (r'`\{[^`]*`\}', Text), # Extern blocks won't be Lexed by Nit + (r'[\r\n\t ]+', Text), + ], + } diff --git a/pygments/lexers/nix.py b/pygments/lexers/nix.py old mode 100644 new mode 100755 index 50210c4..dbe1fae --- a/pygments/lexers/nix.py +++ b/pygments/lexers/nix.py @@ -1,136 +1,136 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.nix - ~~~~~~~~~~~~~~~~~~~ - - Lexers for the NixOS Nix language. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, include -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Literal - -__all__ = ['NixLexer'] - - -class NixLexer(RegexLexer): - """ - For the `Nix language `_. - - .. versionadded:: 2.0 - """ - - name = 'Nix' - aliases = ['nixos', 'nix'] - filenames = ['*.nix'] - mimetypes = ['text/x-nix'] - - flags = re.MULTILINE | re.UNICODE - - keywords = ['rec', 'with', 'let', 'in', 'inherit', 'assert', 'if', - 'else', 'then', '...'] - builtins = ['import', 'abort', 'baseNameOf', 'dirOf', 'isNull', 'builtins', - 'map', 'removeAttrs', 'throw', 'toString', 'derivation'] - operators = ['++', '+', '?', '.', '!', '//', '==', - '!=', '&&', '||', '->', '='] - - punctuations = ["(", ")", "[", "]", ";", "{", "}", ":", ",", "@"] - - tokens = { - 'root': [ - # comments starting with # - (r'#.*$', Comment.Single), - - # multiline comments - (r'/\*', Comment.Multiline, 'comment'), - - # whitespace - (r'\s+', Text), - - # keywords - ('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in keywords), Keyword), - - # highlight the builtins - ('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in builtins), - Name.Builtin), - - (r'\b(true|false|null)\b', Name.Constant), - - # operators - ('(%s)' % '|'.join(re.escape(entry) for entry in operators), - Operator), - - # word operators - (r'\b(or|and)\b', Operator.Word), - - # punctuations - ('(%s)' % '|'.join(re.escape(entry) for entry in punctuations), Punctuation), - - # integers - (r'[0-9]+', Number.Integer), - - # strings - (r'"', String.Double, 'doublequote'), - (r"''", String.Single, 'singlequote'), - - # paths - (r'[\w.+-]*(\/[\w.+-]+)+', Literal), - (r'\<[\w.+-]+(\/[\w.+-]+)*\>', Literal), - - # urls - (r'[a-zA-Z][a-zA-Z0-9\+\-\.]*\:[\w%/?:@&=+$,\\.!~*\'-]+', Literal), - - # names of variables - (r'[\w-]+\s*=', String.Symbol), - (r'[a-zA-Z_][\w\'-]*', Text), - - ], - 'comment': [ - (r'[^/*]+', Comment.Multiline), - (r'/\*', Comment.Multiline, '#push'), - (r'\*/', Comment.Multiline, '#pop'), - (r'[*/]', Comment.Multiline), - ], - 'singlequote': [ - (r"'''", String.Escape), - (r"''\$\{", String.Escape), - (r"''\n", String.Escape), - (r"''\r", String.Escape), - (r"''\t", String.Escape), - (r"''", String.Single, '#pop'), - (r'\$\{', String.Interpol, 'antiquote'), - (r"[^']", String.Single), - ], - 'doublequote': [ - (r'\\', String.Escape), - (r'\\"', String.Escape), - (r'\\$\{', String.Escape), - (r'"', String.Double, '#pop'), - (r'\$\{', String.Interpol, 'antiquote'), - (r'[^"]', String.Double), - ], - 'antiquote': [ - (r"\}", String.Interpol, '#pop'), - # TODO: we should probably escape also here ''${ \${ - (r"\$\{", String.Interpol, '#push'), - include('root'), - ], - } - - def analyse_text(text): - rv = 0.0 - # TODO: let/in - if re.search(r'import.+?<[^>]+>', text): - rv += 0.4 - if re.search(r'mkDerivation\s+(\(|\{|rec)', text): - rv += 0.4 - if re.search(r'=\s+mkIf\s+', text): - rv += 0.4 - if re.search(r'\{[a-zA-Z,\s]+\}:', text): - rv += 0.1 - return rv +# -*- coding: utf-8 -*- +""" + pygments.lexers.nix + ~~~~~~~~~~~~~~~~~~~ + + Lexers for the NixOS Nix language. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Literal + +__all__ = ['NixLexer'] + + +class NixLexer(RegexLexer): + """ + For the `Nix language `_. + + .. versionadded:: 2.0 + """ + + name = 'Nix' + aliases = ['nixos', 'nix'] + filenames = ['*.nix'] + mimetypes = ['text/x-nix'] + + flags = re.MULTILINE | re.UNICODE + + keywords = ['rec', 'with', 'let', 'in', 'inherit', 'assert', 'if', + 'else', 'then', '...'] + builtins = ['import', 'abort', 'baseNameOf', 'dirOf', 'isNull', 'builtins', + 'map', 'removeAttrs', 'throw', 'toString', 'derivation'] + operators = ['++', '+', '?', '.', '!', '//', '==', + '!=', '&&', '||', '->', '='] + + punctuations = ["(", ")", "[", "]", ";", "{", "}", ":", ",", "@"] + + tokens = { + 'root': [ + # comments starting with # + (r'#.*$', Comment.Single), + + # multiline comments + (r'/\*', Comment.Multiline, 'comment'), + + # whitespace + (r'\s+', Text), + + # keywords + ('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in keywords), Keyword), + + # highlight the builtins + ('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in builtins), + Name.Builtin), + + (r'\b(true|false|null)\b', Name.Constant), + + # operators + ('(%s)' % '|'.join(re.escape(entry) for entry in operators), + Operator), + + # word operators + (r'\b(or|and)\b', Operator.Word), + + # punctuations + ('(%s)' % '|'.join(re.escape(entry) for entry in punctuations), Punctuation), + + # integers + (r'[0-9]+', Number.Integer), + + # strings + (r'"', String.Double, 'doublequote'), + (r"''", String.Single, 'singlequote'), + + # paths + (r'[\w.+-]*(\/[\w.+-]+)+', Literal), + (r'\<[\w.+-]+(\/[\w.+-]+)*\>', Literal), + + # urls + (r'[a-zA-Z][a-zA-Z0-9\+\-\.]*\:[\w%/?:@&=+$,\\.!~*\'-]+', Literal), + + # names of variables + (r'[\w-]+\s*=', String.Symbol), + (r'[a-zA-Z_][\w\'-]*', Text), + + ], + 'comment': [ + (r'[^/*]+', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline), + ], + 'singlequote': [ + (r"'''", String.Escape), + (r"''\$\{", String.Escape), + (r"''\n", String.Escape), + (r"''\r", String.Escape), + (r"''\t", String.Escape), + (r"''", String.Single, '#pop'), + (r'\$\{', String.Interpol, 'antiquote'), + (r"[^']", String.Single), + ], + 'doublequote': [ + (r'\\', String.Escape), + (r'\\"', String.Escape), + (r'\\$\{', String.Escape), + (r'"', String.Double, '#pop'), + (r'\$\{', String.Interpol, 'antiquote'), + (r'[^"]', String.Double), + ], + 'antiquote': [ + (r"\}", String.Interpol, '#pop'), + # TODO: we should probably escape also here ''${ \${ + (r"\$\{", String.Interpol, '#push'), + include('root'), + ], + } + + def analyse_text(text): + rv = 0.0 + # TODO: let/in + if re.search(r'import.+?<[^>]+>', text): + rv += 0.4 + if re.search(r'mkDerivation\s+(\(|\{|rec)', text): + rv += 0.4 + if re.search(r'=\s+mkIf\s+', text): + rv += 0.4 + if re.search(r'\{[a-zA-Z,\s]+\}:', text): + rv += 0.1 + return rv diff --git a/pygments/lexers/oberon.py b/pygments/lexers/oberon.py old mode 100644 new mode 100755 index 1c18488..df51b10 --- a/pygments/lexers/oberon.py +++ b/pygments/lexers/oberon.py @@ -1,105 +1,105 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.oberon - ~~~~~~~~~~~~~~~~~~~~~~ - - Lexers for Oberon family languages. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, include, words -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation - -__all__ = ['ComponentPascalLexer'] - - -class ComponentPascalLexer(RegexLexer): - """ - For `Component Pascal `_ source code. - - .. versionadded:: 2.1 - """ - name = 'Component Pascal' - aliases = ['componentpascal', 'cp'] - filenames = ['*.cp', '*.cps'] - mimetypes = ['text/x-component-pascal'] - - flags = re.MULTILINE | re.DOTALL - - tokens = { - 'root': [ - include('whitespace'), - include('comments'), - include('punctuation'), - include('numliterals'), - include('strings'), - include('operators'), - include('builtins'), - include('identifiers'), - ], - 'whitespace': [ - (r'\n+', Text), # blank lines - (r'\s+', Text), # whitespace - ], - 'comments': [ - (r'\(\*([^$].*?)\*\)', Comment.Multiline), - # TODO: nested comments (* (* ... *) ... (* ... *) *) not supported! - ], - 'punctuation': [ - (r'[()\[\]{},.:;|]', Punctuation), - ], - 'numliterals': [ - (r'[0-9A-F]+X\b', Number.Hex), # char code - (r'[0-9A-F]+[HL]\b', Number.Hex), # hexadecimal number - (r'[0-9]+\.[0-9]+E[+-][0-9]+', Number.Float), # real number - (r'[0-9]+\.[0-9]+', Number.Float), # real number - (r'[0-9]+', Number.Integer), # decimal whole number - ], - 'strings': [ - (r"'[^\n']*'", String), # single quoted string - (r'"[^\n"]*"', String), # double quoted string - ], - 'operators': [ - # Arithmetic Operators - (r'[+-]', Operator), - (r'[*/]', Operator), - # Relational Operators - (r'[=#<>]', Operator), - # Dereferencing Operator - (r'\^', Operator), - # Logical AND Operator - (r'&', Operator), - # Logical NOT Operator - (r'~', Operator), - # Assignment Symbol - (r':=', Operator), - # Range Constructor - (r'\.\.', Operator), - (r'\$', Operator), - ], - 'identifiers': [ - (r'([a-zA-Z_$][\w$]*)', Name), - ], - 'builtins': [ - (words(( - 'ANYPTR', 'ANYREC', 'BOOLEAN', 'BYTE', 'CHAR', 'INTEGER', 'LONGINT', - 'REAL', 'SET', 'SHORTCHAR', 'SHORTINT', 'SHORTREAL' - ), suffix=r'\b'), Keyword.Type), - (words(( - 'ABS', 'ABSTRACT', 'ARRAY', 'ASH', 'ASSERT', 'BEGIN', 'BITS', 'BY', - 'CAP', 'CASE', 'CHR', 'CLOSE', 'CONST', 'DEC', 'DIV', 'DO', 'ELSE', - 'ELSIF', 'EMPTY', 'END', 'ENTIER', 'EXCL', 'EXIT', 'EXTENSIBLE', 'FOR', - 'HALT', 'IF', 'IMPORT', 'IN', 'INC', 'INCL', 'IS', 'LEN', 'LIMITED', - 'LONG', 'LOOP', 'MAX', 'MIN', 'MOD', 'MODULE', 'NEW', 'ODD', 'OF', - 'OR', 'ORD', 'OUT', 'POINTER', 'PROCEDURE', 'RECORD', 'REPEAT', 'RETURN', - 'SHORT', 'SHORTCHAR', 'SHORTINT', 'SIZE', 'THEN', 'TYPE', 'TO', 'UNTIL', - 'VAR', 'WHILE', 'WITH' - ), suffix=r'\b'), Keyword.Reserved), - (r'(TRUE|FALSE|NIL|INF)\b', Keyword.Constant), - ] - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.oberon + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Oberon family languages. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['ComponentPascalLexer'] + + +class ComponentPascalLexer(RegexLexer): + """ + For `Component Pascal `_ source code. + + .. versionadded:: 2.1 + """ + name = 'Component Pascal' + aliases = ['componentpascal', 'cp'] + filenames = ['*.cp', '*.cps'] + mimetypes = ['text/x-component-pascal'] + + flags = re.MULTILINE | re.DOTALL + + tokens = { + 'root': [ + include('whitespace'), + include('comments'), + include('punctuation'), + include('numliterals'), + include('strings'), + include('operators'), + include('builtins'), + include('identifiers'), + ], + 'whitespace': [ + (r'\n+', Text), # blank lines + (r'\s+', Text), # whitespace + ], + 'comments': [ + (r'\(\*([^$].*?)\*\)', Comment.Multiline), + # TODO: nested comments (* (* ... *) ... (* ... *) *) not supported! + ], + 'punctuation': [ + (r'[()\[\]{},.:;|]', Punctuation), + ], + 'numliterals': [ + (r'[0-9A-F]+X\b', Number.Hex), # char code + (r'[0-9A-F]+[HL]\b', Number.Hex), # hexadecimal number + (r'[0-9]+\.[0-9]+E[+-][0-9]+', Number.Float), # real number + (r'[0-9]+\.[0-9]+', Number.Float), # real number + (r'[0-9]+', Number.Integer), # decimal whole number + ], + 'strings': [ + (r"'[^\n']*'", String), # single quoted string + (r'"[^\n"]*"', String), # double quoted string + ], + 'operators': [ + # Arithmetic Operators + (r'[+-]', Operator), + (r'[*/]', Operator), + # Relational Operators + (r'[=#<>]', Operator), + # Dereferencing Operator + (r'\^', Operator), + # Logical AND Operator + (r'&', Operator), + # Logical NOT Operator + (r'~', Operator), + # Assignment Symbol + (r':=', Operator), + # Range Constructor + (r'\.\.', Operator), + (r'\$', Operator), + ], + 'identifiers': [ + (r'([a-zA-Z_$][\w$]*)', Name), + ], + 'builtins': [ + (words(( + 'ANYPTR', 'ANYREC', 'BOOLEAN', 'BYTE', 'CHAR', 'INTEGER', 'LONGINT', + 'REAL', 'SET', 'SHORTCHAR', 'SHORTINT', 'SHORTREAL' + ), suffix=r'\b'), Keyword.Type), + (words(( + 'ABS', 'ABSTRACT', 'ARRAY', 'ASH', 'ASSERT', 'BEGIN', 'BITS', 'BY', + 'CAP', 'CASE', 'CHR', 'CLOSE', 'CONST', 'DEC', 'DIV', 'DO', 'ELSE', + 'ELSIF', 'EMPTY', 'END', 'ENTIER', 'EXCL', 'EXIT', 'EXTENSIBLE', 'FOR', + 'HALT', 'IF', 'IMPORT', 'IN', 'INC', 'INCL', 'IS', 'LEN', 'LIMITED', + 'LONG', 'LOOP', 'MAX', 'MIN', 'MOD', 'MODULE', 'NEW', 'ODD', 'OF', + 'OR', 'ORD', 'OUT', 'POINTER', 'PROCEDURE', 'RECORD', 'REPEAT', 'RETURN', + 'SHORT', 'SHORTCHAR', 'SHORTINT', 'SIZE', 'THEN', 'TYPE', 'TO', 'UNTIL', + 'VAR', 'WHILE', 'WITH' + ), suffix=r'\b'), Keyword.Reserved), + (r'(TRUE|FALSE|NIL|INF)\b', Keyword.Constant), + ] + } diff --git a/pygments/lexers/objective.py b/pygments/lexers/objective.py old mode 100644 new mode 100755 index 777d8d4..b1649d8 --- a/pygments/lexers/objective.py +++ b/pygments/lexers/objective.py @@ -1,504 +1,504 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.objective - ~~~~~~~~~~~~~~~~~~~~~~~~~ - - Lexers for Objective-C family languages. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, include, bygroups, using, this, words, \ - inherit, default -from pygments.token import Text, Keyword, Name, String, Operator, \ - Number, Punctuation, Literal, Comment - -from pygments.lexers.c_cpp import CLexer, CppLexer - -__all__ = ['ObjectiveCLexer', 'ObjectiveCppLexer', 'LogosLexer', 'SwiftLexer'] - - -def objective(baselexer): - """ - Generate a subclass of baselexer that accepts the Objective-C syntax - extensions. - """ - - # Have to be careful not to accidentally match JavaDoc/Doxygen syntax here, - # since that's quite common in ordinary C/C++ files. It's OK to match - # JavaDoc/Doxygen keywords that only apply to Objective-C, mind. - # - # The upshot of this is that we CANNOT match @class or @interface - _oc_keywords = re.compile(r'@(?:end|implementation|protocol)') - - # Matches [ ? identifier ( identifier ? ] | identifier? : ) - # (note the identifier is *optional* when there is a ':'!) - _oc_message = re.compile(r'\[\s*[a-zA-Z_]\w*\s+' - r'(?:[a-zA-Z_]\w*\s*\]|' - r'(?:[a-zA-Z_]\w*)?:)') - - class GeneratedObjectiveCVariant(baselexer): - """ - Implements Objective-C syntax on top of an existing C family lexer. - """ - - tokens = { - 'statements': [ - (r'@"', String, 'string'), - (r'@(YES|NO)', Number), - (r"@'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), - (r'@(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), - (r'@(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), - (r'@0x[0-9a-fA-F]+[Ll]?', Number.Hex), - (r'@0[0-7]+[Ll]?', Number.Oct), - (r'@\d+[Ll]?', Number.Integer), - (r'@\(', Literal, 'literal_number'), - (r'@\[', Literal, 'literal_array'), - (r'@\{', Literal, 'literal_dictionary'), - (words(( - '@selector', '@private', '@protected', '@public', '@encode', - '@synchronized', '@try', '@throw', '@catch', '@finally', - '@end', '@property', '@synthesize', '__bridge', '__bridge_transfer', - '__autoreleasing', '__block', '__weak', '__strong', 'weak', 'strong', - 'copy', 'retain', 'assign', 'unsafe_unretained', 'atomic', 'nonatomic', - 'readonly', 'readwrite', 'setter', 'getter', 'typeof', 'in', - 'out', 'inout', 'release', 'class', '@dynamic', '@optional', - '@required', '@autoreleasepool', '@import'), suffix=r'\b'), - Keyword), - (words(('id', 'instancetype', 'Class', 'IMP', 'SEL', 'BOOL', - 'IBOutlet', 'IBAction', 'unichar'), suffix=r'\b'), - Keyword.Type), - (r'@(true|false|YES|NO)\n', Name.Builtin), - (r'(YES|NO|nil|self|super)\b', Name.Builtin), - # Carbon types - (r'(Boolean|UInt8|SInt8|UInt16|SInt16|UInt32|SInt32)\b', Keyword.Type), - # Carbon built-ins - (r'(TRUE|FALSE)\b', Name.Builtin), - (r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text), - ('#pop', 'oc_classname')), - (r'(@class|@protocol)(\s+)', bygroups(Keyword, Text), - ('#pop', 'oc_forward_classname')), - # @ can also prefix other expressions like @{...} or @(...) - (r'@', Punctuation), - inherit, - ], - 'oc_classname': [ - # interface definition that inherits - (r'([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?(\s*)(\{)', - bygroups(Name.Class, Text, Name.Class, Text, Punctuation), - ('#pop', 'oc_ivars')), - (r'([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?', - bygroups(Name.Class, Text, Name.Class), '#pop'), - # interface definition for a category - (r'([a-zA-Z$_][\w$]*)(\s*)(\([a-zA-Z$_][\w$]*\))(\s*)(\{)', - bygroups(Name.Class, Text, Name.Label, Text, Punctuation), - ('#pop', 'oc_ivars')), - (r'([a-zA-Z$_][\w$]*)(\s*)(\([a-zA-Z$_][\w$]*\))', - bygroups(Name.Class, Text, Name.Label), '#pop'), - # simple interface / implementation - (r'([a-zA-Z$_][\w$]*)(\s*)(\{)', - bygroups(Name.Class, Text, Punctuation), ('#pop', 'oc_ivars')), - (r'([a-zA-Z$_][\w$]*)', Name.Class, '#pop') - ], - 'oc_forward_classname': [ - (r'([a-zA-Z$_][\w$]*)(\s*,\s*)', - bygroups(Name.Class, Text), 'oc_forward_classname'), - (r'([a-zA-Z$_][\w$]*)(\s*;?)', - bygroups(Name.Class, Text), '#pop') - ], - 'oc_ivars': [ - include('whitespace'), - include('statements'), - (';', Punctuation), - (r'\{', Punctuation, '#push'), - (r'\}', Punctuation, '#pop'), - ], - 'root': [ - # methods - (r'^([-+])(\s*)' # method marker - r'(\(.*?\))?(\s*)' # return type - r'([a-zA-Z$_][\w$]*:?)', # begin of method name - bygroups(Punctuation, Text, using(this), - Text, Name.Function), - 'method'), - inherit, - ], - 'method': [ - include('whitespace'), - # TODO unsure if ellipses are allowed elsewhere, see - # discussion in Issue 789 - (r',', Punctuation), - (r'\.\.\.', Punctuation), - (r'(\(.*?\))(\s*)([a-zA-Z$_][\w$]*)', - bygroups(using(this), Text, Name.Variable)), - (r'[a-zA-Z$_][\w$]*:', Name.Function), - (';', Punctuation, '#pop'), - (r'\{', Punctuation, 'function'), - default('#pop'), - ], - 'literal_number': [ - (r'\(', Punctuation, 'literal_number_inner'), - (r'\)', Literal, '#pop'), - include('statement'), - ], - 'literal_number_inner': [ - (r'\(', Punctuation, '#push'), - (r'\)', Punctuation, '#pop'), - include('statement'), - ], - 'literal_array': [ - (r'\[', Punctuation, 'literal_array_inner'), - (r'\]', Literal, '#pop'), - include('statement'), - ], - 'literal_array_inner': [ - (r'\[', Punctuation, '#push'), - (r'\]', Punctuation, '#pop'), - include('statement'), - ], - 'literal_dictionary': [ - (r'\}', Literal, '#pop'), - include('statement'), - ], - } - - def analyse_text(text): - if _oc_keywords.search(text): - return 1.0 - elif '@"' in text: # strings - return 0.8 - elif re.search('@[0-9]+', text): - return 0.7 - elif _oc_message.search(text): - return 0.8 - return 0 - - def get_tokens_unprocessed(self, text): - from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, \ - COCOA_PROTOCOLS, COCOA_PRIMITIVES - - for index, token, value in \ - baselexer.get_tokens_unprocessed(self, text): - if token is Name or token is Name.Class: - if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS \ - or value in COCOA_PRIMITIVES: - token = Name.Builtin.Pseudo - - yield index, token, value - - return GeneratedObjectiveCVariant - - -class ObjectiveCLexer(objective(CLexer)): - """ - For Objective-C source code with preprocessor directives. - """ - - name = 'Objective-C' - aliases = ['objective-c', 'objectivec', 'obj-c', 'objc'] - filenames = ['*.m', '*.h'] - mimetypes = ['text/x-objective-c'] - priority = 0.05 # Lower than C - - -class ObjectiveCppLexer(objective(CppLexer)): - """ - For Objective-C++ source code with preprocessor directives. - """ - - name = 'Objective-C++' - aliases = ['objective-c++', 'objectivec++', 'obj-c++', 'objc++'] - filenames = ['*.mm', '*.hh'] - mimetypes = ['text/x-objective-c++'] - priority = 0.05 # Lower than C++ - - -class LogosLexer(ObjectiveCppLexer): - """ - For Logos + Objective-C source code with preprocessor directives. - - .. versionadded:: 1.6 - """ - - name = 'Logos' - aliases = ['logos'] - filenames = ['*.x', '*.xi', '*.xm', '*.xmi'] - mimetypes = ['text/x-logos'] - priority = 0.25 - - tokens = { - 'statements': [ - (r'(%orig|%log)\b', Keyword), - (r'(%c)\b(\()(\s*)([a-zA-Z$_][\w$]*)(\s*)(\))', - bygroups(Keyword, Punctuation, Text, Name.Class, Text, Punctuation)), - (r'(%init)\b(\()', - bygroups(Keyword, Punctuation), 'logos_init_directive'), - (r'(%init)(?=\s*;)', bygroups(Keyword)), - (r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)', - bygroups(Keyword, Text, Name.Class), '#pop'), - (r'(%subclass)(\s+)', bygroups(Keyword, Text), - ('#pop', 'logos_classname')), - inherit, - ], - 'logos_init_directive': [ - (r'\s+', Text), - (',', Punctuation, ('logos_init_directive', '#pop')), - (r'([a-zA-Z$_][\w$]*)(\s*)(=)(\s*)([^);]*)', - bygroups(Name.Class, Text, Punctuation, Text, Text)), - (r'([a-zA-Z$_][\w$]*)', Name.Class), - (r'\)', Punctuation, '#pop'), - ], - 'logos_classname': [ - (r'([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?', - bygroups(Name.Class, Text, Name.Class), '#pop'), - (r'([a-zA-Z$_][\w$]*)', Name.Class, '#pop') - ], - 'root': [ - (r'(%subclass)(\s+)', bygroups(Keyword, Text), - 'logos_classname'), - (r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)', - bygroups(Keyword, Text, Name.Class)), - (r'(%config)(\s*\(\s*)(\w+)(\s*=\s*)(.*?)(\s*\)\s*)', - bygroups(Keyword, Text, Name.Variable, Text, String, Text)), - (r'(%ctor)(\s*)(\{)', bygroups(Keyword, Text, Punctuation), - 'function'), - (r'(%new)(\s*)(\()(\s*.*?\s*)(\))', - bygroups(Keyword, Text, Keyword, String, Keyword)), - (r'(\s*)(%end)(\s*)', bygroups(Text, Keyword, Text)), - inherit, - ], - } - - _logos_keywords = re.compile(r'%(?:hook|ctor|init|c\()') - - def analyse_text(text): - if LogosLexer._logos_keywords.search(text): - return 1.0 - return 0 - - -class SwiftLexer(RegexLexer): - """ - For `Swift `_ source. - - .. versionadded:: 2.0 - """ - name = 'Swift' - filenames = ['*.swift'] - aliases = ['swift'] - mimetypes = ['text/x-swift'] - - tokens = { - 'root': [ - # Whitespace and Comments - (r'\n', Text), - (r'\s+', Text), - (r'//', Comment.Single, 'comment-single'), - (r'/\*', Comment.Multiline, 'comment-multi'), - (r'#(if|elseif|else|endif|available)\b', Comment.Preproc, 'preproc'), - - # Keywords - include('keywords'), - - # Global Types - (words(( - 'Array', 'AutoreleasingUnsafeMutablePointer', 'BidirectionalReverseView', - 'Bit', 'Bool', 'CFunctionPointer', 'COpaquePointer', 'CVaListPointer', - 'Character', 'ClosedInterval', 'CollectionOfOne', 'ContiguousArray', - 'Dictionary', 'DictionaryGenerator', 'DictionaryIndex', 'Double', - 'EmptyCollection', 'EmptyGenerator', 'EnumerateGenerator', - 'EnumerateSequence', 'FilterCollectionView', - 'FilterCollectionViewIndex', 'FilterGenerator', 'FilterSequenceView', - 'Float', 'Float80', 'FloatingPointClassification', 'GeneratorOf', - 'GeneratorOfOne', 'GeneratorSequence', 'HalfOpenInterval', 'HeapBuffer', - 'HeapBufferStorage', 'ImplicitlyUnwrappedOptional', 'IndexingGenerator', - 'Int', 'Int16', 'Int32', 'Int64', 'Int8', 'LazyBidirectionalCollection', - 'LazyForwardCollection', 'LazyRandomAccessCollection', - 'LazySequence', 'MapCollectionView', 'MapSequenceGenerator', - 'MapSequenceView', 'MirrorDisposition', 'ObjectIdentifier', 'OnHeap', - 'Optional', 'PermutationGenerator', 'QuickLookObject', - 'RandomAccessReverseView', 'Range', 'RangeGenerator', 'RawByte', 'Repeat', - 'ReverseBidirectionalIndex', 'ReverseRandomAccessIndex', 'SequenceOf', - 'SinkOf', 'Slice', 'StaticString', 'StrideThrough', 'StrideThroughGenerator', - 'StrideTo', 'StrideToGenerator', 'String', 'UInt', 'UInt16', 'UInt32', - 'UInt64', 'UInt8', 'UTF16', 'UTF32', 'UTF8', 'UnicodeDecodingResult', - 'UnicodeScalar', 'Unmanaged', 'UnsafeBufferPointer', - 'UnsafeBufferPointerGenerator', 'UnsafeMutableBufferPointer', - 'UnsafeMutablePointer', 'UnsafePointer', 'Zip2', 'ZipGenerator2', - # Protocols - 'AbsoluteValuable', 'AnyObject', 'ArrayLiteralConvertible', - 'BidirectionalIndexType', 'BitwiseOperationsType', - 'BooleanLiteralConvertible', 'BooleanType', 'CVarArgType', - 'CollectionType', 'Comparable', 'DebugPrintable', - 'DictionaryLiteralConvertible', 'Equatable', - 'ExtendedGraphemeClusterLiteralConvertible', - 'ExtensibleCollectionType', 'FloatLiteralConvertible', - 'FloatingPointType', 'ForwardIndexType', 'GeneratorType', 'Hashable', - 'IntegerArithmeticType', 'IntegerLiteralConvertible', 'IntegerType', - 'IntervalType', 'MirrorType', 'MutableCollectionType', 'MutableSliceable', - 'NilLiteralConvertible', 'OutputStreamType', 'Printable', - 'RandomAccessIndexType', 'RangeReplaceableCollectionType', - 'RawOptionSetType', 'RawRepresentable', 'Reflectable', 'SequenceType', - 'SignedIntegerType', 'SignedNumberType', 'SinkType', 'Sliceable', - 'Streamable', 'Strideable', 'StringInterpolationConvertible', - 'StringLiteralConvertible', 'UnicodeCodecType', - 'UnicodeScalarLiteralConvertible', 'UnsignedIntegerType', - '_ArrayBufferType', '_BidirectionalIndexType', '_CocoaStringType', - '_CollectionType', '_Comparable', '_ExtensibleCollectionType', - '_ForwardIndexType', '_Incrementable', '_IntegerArithmeticType', - '_IntegerType', '_ObjectiveCBridgeable', '_RandomAccessIndexType', - '_RawOptionSetType', '_SequenceType', '_Sequence_Type', - '_SignedIntegerType', '_SignedNumberType', '_Sliceable', '_Strideable', - '_SwiftNSArrayRequiredOverridesType', '_SwiftNSArrayType', - '_SwiftNSCopyingType', '_SwiftNSDictionaryRequiredOverridesType', - '_SwiftNSDictionaryType', '_SwiftNSEnumeratorType', - '_SwiftNSFastEnumerationType', '_SwiftNSStringRequiredOverridesType', - '_SwiftNSStringType', '_UnsignedIntegerType', - # Variables - 'C_ARGC', 'C_ARGV', 'Process', - # Typealiases - 'Any', 'AnyClass', 'BooleanLiteralType', 'CBool', 'CChar', 'CChar16', - 'CChar32', 'CDouble', 'CFloat', 'CInt', 'CLong', 'CLongLong', 'CShort', - 'CSignedChar', 'CUnsignedInt', 'CUnsignedLong', 'CUnsignedShort', - 'CWideChar', 'ExtendedGraphemeClusterType', 'Float32', 'Float64', - 'FloatLiteralType', 'IntMax', 'IntegerLiteralType', 'StringLiteralType', - 'UIntMax', 'UWord', 'UnicodeScalarType', 'Void', 'Word', - # Foundation/Cocoa - 'NSErrorPointer', 'NSObjectProtocol', 'Selector'), suffix=r'\b'), - Name.Builtin), - # Functions - (words(( - 'abs', 'advance', 'alignof', 'alignofValue', 'assert', 'assertionFailure', - 'contains', 'count', 'countElements', 'debugPrint', 'debugPrintln', - 'distance', 'dropFirst', 'dropLast', 'dump', 'enumerate', 'equal', - 'extend', 'fatalError', 'filter', 'find', 'first', 'getVaList', 'indices', - 'insert', 'isEmpty', 'join', 'last', 'lazy', 'lexicographicalCompare', - 'map', 'max', 'maxElement', 'min', 'minElement', 'numericCast', 'overlaps', - 'partition', 'precondition', 'preconditionFailure', 'prefix', 'print', - 'println', 'reduce', 'reflect', 'removeAll', 'removeAtIndex', 'removeLast', - 'removeRange', 'reverse', 'sizeof', 'sizeofValue', 'sort', 'sorted', - 'splice', 'split', 'startsWith', 'stride', 'strideof', 'strideofValue', - 'suffix', 'swap', 'toDebugString', 'toString', 'transcode', - 'underestimateCount', 'unsafeAddressOf', 'unsafeBitCast', 'unsafeDowncast', - 'withExtendedLifetime', 'withUnsafeMutablePointer', - 'withUnsafeMutablePointers', 'withUnsafePointer', 'withUnsafePointers', - 'withVaList'), suffix=r'\b'), - Name.Builtin.Pseudo), - - # Implicit Block Variables - (r'\$\d+', Name.Variable), - - # Binary Literal - (r'0b[01_]+', Number.Bin), - # Octal Literal - (r'0o[0-7_]+', Number.Oct), - # Hexadecimal Literal - (r'0x[0-9a-fA-F_]+', Number.Hex), - # Decimal Literal - (r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?[0-9_]+|' - r'\.[0-9_]*|[eE][+\-]?[0-9_]+)', Number.Float), - (r'[0-9][0-9_]*', Number.Integer), - # String Literal - (r'"', String, 'string'), - - # Operators and Punctuation - (r'[(){}\[\].,:;=@#`?]|->|[<&?](?=\w)|(?<=\w)[>!?]', Punctuation), - (r'[/=\-+!*%<>&|^?~]+', Operator), - - # Identifier - (r'[a-zA-Z_]\w*', Name) - ], - 'keywords': [ - (words(( - 'as', 'break', 'case', 'catch', 'continue', 'default', 'defer', - 'do', 'else', 'fallthrough', 'for', 'guard', 'if', 'in', 'is', - 'repeat', 'return', '#selector', 'switch', 'throw', 'try', - 'where', 'while'), suffix=r'\b'), - Keyword), - (r'@availability\([^)]+\)', Keyword.Reserved), - (words(( - 'associativity', 'convenience', 'dynamic', 'didSet', 'final', - 'get', 'indirect', 'infix', 'inout', 'lazy', 'left', 'mutating', - 'none', 'nonmutating', 'optional', 'override', 'postfix', - 'precedence', 'prefix', 'Protocol', 'required', 'rethrows', - 'right', 'set', 'throws', 'Type', 'unowned', 'weak', 'willSet', - '@availability', '@autoclosure', '@noreturn', - '@NSApplicationMain', '@NSCopying', '@NSManaged', '@objc', - '@UIApplicationMain', '@IBAction', '@IBDesignable', - '@IBInspectable', '@IBOutlet'), suffix=r'\b'), - Keyword.Reserved), - (r'(as|dynamicType|false|is|nil|self|Self|super|true|__COLUMN__' - r'|__FILE__|__FUNCTION__|__LINE__|_' - r'|#(?:file|line|column|function))\b', Keyword.Constant), - (r'import\b', Keyword.Declaration, 'module'), - (r'(class|enum|extension|struct|protocol)(\s+)([a-zA-Z_]\w*)', - bygroups(Keyword.Declaration, Text, Name.Class)), - (r'(func)(\s+)([a-zA-Z_]\w*)', - bygroups(Keyword.Declaration, Text, Name.Function)), - (r'(var|let)(\s+)([a-zA-Z_]\w*)', bygroups(Keyword.Declaration, - Text, Name.Variable)), - (words(( - 'class', 'deinit', 'enum', 'extension', 'func', 'import', 'init', - 'internal', 'let', 'operator', 'private', 'protocol', 'public', - 'static', 'struct', 'subscript', 'typealias', 'var'), suffix=r'\b'), - Keyword.Declaration) - ], - 'comment': [ - (r':param: [a-zA-Z_]\w*|:returns?:|(FIXME|MARK|TODO):', - Comment.Special) - ], - - # Nested - 'comment-single': [ - (r'\n', Text, '#pop'), - include('comment'), - (r'[^\n]', Comment.Single) - ], - 'comment-multi': [ - include('comment'), - (r'[^*/]', Comment.Multiline), - (r'/\*', Comment.Multiline, '#push'), - (r'\*/', Comment.Multiline, '#pop'), - (r'[*/]', Comment.Multiline) - ], - 'module': [ - (r'\n', Text, '#pop'), - (r'[a-zA-Z_]\w*', Name.Class), - include('root') - ], - 'preproc': [ - (r'\n', Text, '#pop'), - include('keywords'), - (r'[A-Za-z]\w*', Comment.Preproc), - include('root') - ], - 'string': [ - (r'\\\(', String.Interpol, 'string-intp'), - (r'"', String, '#pop'), - (r"""\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}""" - r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}""", String.Escape), - (r'[^\\"]+', String), - (r'\\', String) - ], - 'string-intp': [ - (r'\(', String.Interpol, '#push'), - (r'\)', String.Interpol, '#pop'), - include('root') - ] - } - - def get_tokens_unprocessed(self, text): - from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, \ - COCOA_PROTOCOLS, COCOA_PRIMITIVES - - for index, token, value in \ - RegexLexer.get_tokens_unprocessed(self, text): - if token is Name or token is Name.Class: - if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS \ - or value in COCOA_PRIMITIVES: - token = Name.Builtin.Pseudo - - yield index, token, value +# -*- coding: utf-8 -*- +""" + pygments.lexers.objective + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Objective-C family languages. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, using, this, words, \ + inherit, default +from pygments.token import Text, Keyword, Name, String, Operator, \ + Number, Punctuation, Literal, Comment + +from pygments.lexers.c_cpp import CLexer, CppLexer + +__all__ = ['ObjectiveCLexer', 'ObjectiveCppLexer', 'LogosLexer', 'SwiftLexer'] + + +def objective(baselexer): + """ + Generate a subclass of baselexer that accepts the Objective-C syntax + extensions. + """ + + # Have to be careful not to accidentally match JavaDoc/Doxygen syntax here, + # since that's quite common in ordinary C/C++ files. It's OK to match + # JavaDoc/Doxygen keywords that only apply to Objective-C, mind. + # + # The upshot of this is that we CANNOT match @class or @interface + _oc_keywords = re.compile(r'@(?:end|implementation|protocol)') + + # Matches [ ? identifier ( identifier ? ] | identifier? : ) + # (note the identifier is *optional* when there is a ':'!) + _oc_message = re.compile(r'\[\s*[a-zA-Z_]\w*\s+' + r'(?:[a-zA-Z_]\w*\s*\]|' + r'(?:[a-zA-Z_]\w*)?:)') + + class GeneratedObjectiveCVariant(baselexer): + """ + Implements Objective-C syntax on top of an existing C family lexer. + """ + + tokens = { + 'statements': [ + (r'@"', String, 'string'), + (r'@(YES|NO)', Number), + (r"@'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), + (r'@(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), + (r'@(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), + (r'@0x[0-9a-fA-F]+[Ll]?', Number.Hex), + (r'@0[0-7]+[Ll]?', Number.Oct), + (r'@\d+[Ll]?', Number.Integer), + (r'@\(', Literal, 'literal_number'), + (r'@\[', Literal, 'literal_array'), + (r'@\{', Literal, 'literal_dictionary'), + (words(( + '@selector', '@private', '@protected', '@public', '@encode', + '@synchronized', '@try', '@throw', '@catch', '@finally', + '@end', '@property', '@synthesize', '__bridge', '__bridge_transfer', + '__autoreleasing', '__block', '__weak', '__strong', 'weak', 'strong', + 'copy', 'retain', 'assign', 'unsafe_unretained', 'atomic', 'nonatomic', + 'readonly', 'readwrite', 'setter', 'getter', 'typeof', 'in', + 'out', 'inout', 'release', 'class', '@dynamic', '@optional', + '@required', '@autoreleasepool', '@import'), suffix=r'\b'), + Keyword), + (words(('id', 'instancetype', 'Class', 'IMP', 'SEL', 'BOOL', + 'IBOutlet', 'IBAction', 'unichar'), suffix=r'\b'), + Keyword.Type), + (r'@(true|false|YES|NO)\n', Name.Builtin), + (r'(YES|NO|nil|self|super)\b', Name.Builtin), + # Carbon types + (r'(Boolean|UInt8|SInt8|UInt16|SInt16|UInt32|SInt32)\b', Keyword.Type), + # Carbon built-ins + (r'(TRUE|FALSE)\b', Name.Builtin), + (r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text), + ('#pop', 'oc_classname')), + (r'(@class|@protocol)(\s+)', bygroups(Keyword, Text), + ('#pop', 'oc_forward_classname')), + # @ can also prefix other expressions like @{...} or @(...) + (r'@', Punctuation), + inherit, + ], + 'oc_classname': [ + # interface definition that inherits + (r'([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?(\s*)(\{)', + bygroups(Name.Class, Text, Name.Class, Text, Punctuation), + ('#pop', 'oc_ivars')), + (r'([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?', + bygroups(Name.Class, Text, Name.Class), '#pop'), + # interface definition for a category + (r'([a-zA-Z$_][\w$]*)(\s*)(\([a-zA-Z$_][\w$]*\))(\s*)(\{)', + bygroups(Name.Class, Text, Name.Label, Text, Punctuation), + ('#pop', 'oc_ivars')), + (r'([a-zA-Z$_][\w$]*)(\s*)(\([a-zA-Z$_][\w$]*\))', + bygroups(Name.Class, Text, Name.Label), '#pop'), + # simple interface / implementation + (r'([a-zA-Z$_][\w$]*)(\s*)(\{)', + bygroups(Name.Class, Text, Punctuation), ('#pop', 'oc_ivars')), + (r'([a-zA-Z$_][\w$]*)', Name.Class, '#pop') + ], + 'oc_forward_classname': [ + (r'([a-zA-Z$_][\w$]*)(\s*,\s*)', + bygroups(Name.Class, Text), 'oc_forward_classname'), + (r'([a-zA-Z$_][\w$]*)(\s*;?)', + bygroups(Name.Class, Text), '#pop') + ], + 'oc_ivars': [ + include('whitespace'), + include('statements'), + (';', Punctuation), + (r'\{', Punctuation, '#push'), + (r'\}', Punctuation, '#pop'), + ], + 'root': [ + # methods + (r'^([-+])(\s*)' # method marker + r'(\(.*?\))?(\s*)' # return type + r'([a-zA-Z$_][\w$]*:?)', # begin of method name + bygroups(Punctuation, Text, using(this), + Text, Name.Function), + 'method'), + inherit, + ], + 'method': [ + include('whitespace'), + # TODO unsure if ellipses are allowed elsewhere, see + # discussion in Issue 789 + (r',', Punctuation), + (r'\.\.\.', Punctuation), + (r'(\(.*?\))(\s*)([a-zA-Z$_][\w$]*)', + bygroups(using(this), Text, Name.Variable)), + (r'[a-zA-Z$_][\w$]*:', Name.Function), + (';', Punctuation, '#pop'), + (r'\{', Punctuation, 'function'), + default('#pop'), + ], + 'literal_number': [ + (r'\(', Punctuation, 'literal_number_inner'), + (r'\)', Literal, '#pop'), + include('statement'), + ], + 'literal_number_inner': [ + (r'\(', Punctuation, '#push'), + (r'\)', Punctuation, '#pop'), + include('statement'), + ], + 'literal_array': [ + (r'\[', Punctuation, 'literal_array_inner'), + (r'\]', Literal, '#pop'), + include('statement'), + ], + 'literal_array_inner': [ + (r'\[', Punctuation, '#push'), + (r'\]', Punctuation, '#pop'), + include('statement'), + ], + 'literal_dictionary': [ + (r'\}', Literal, '#pop'), + include('statement'), + ], + } + + def analyse_text(text): + if _oc_keywords.search(text): + return 1.0 + elif '@"' in text: # strings + return 0.8 + elif re.search('@[0-9]+', text): + return 0.7 + elif _oc_message.search(text): + return 0.8 + return 0 + + def get_tokens_unprocessed(self, text): + from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, \ + COCOA_PROTOCOLS, COCOA_PRIMITIVES + + for index, token, value in \ + baselexer.get_tokens_unprocessed(self, text): + if token is Name or token is Name.Class: + if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS \ + or value in COCOA_PRIMITIVES: + token = Name.Builtin.Pseudo + + yield index, token, value + + return GeneratedObjectiveCVariant + + +class ObjectiveCLexer(objective(CLexer)): + """ + For Objective-C source code with preprocessor directives. + """ + + name = 'Objective-C' + aliases = ['objective-c', 'objectivec', 'obj-c', 'objc'] + filenames = ['*.m', '*.h'] + mimetypes = ['text/x-objective-c'] + priority = 0.05 # Lower than C + + +class ObjectiveCppLexer(objective(CppLexer)): + """ + For Objective-C++ source code with preprocessor directives. + """ + + name = 'Objective-C++' + aliases = ['objective-c++', 'objectivec++', 'obj-c++', 'objc++'] + filenames = ['*.mm', '*.hh'] + mimetypes = ['text/x-objective-c++'] + priority = 0.05 # Lower than C++ + + +class LogosLexer(ObjectiveCppLexer): + """ + For Logos + Objective-C source code with preprocessor directives. + + .. versionadded:: 1.6 + """ + + name = 'Logos' + aliases = ['logos'] + filenames = ['*.x', '*.xi', '*.xm', '*.xmi'] + mimetypes = ['text/x-logos'] + priority = 0.25 + + tokens = { + 'statements': [ + (r'(%orig|%log)\b', Keyword), + (r'(%c)\b(\()(\s*)([a-zA-Z$_][\w$]*)(\s*)(\))', + bygroups(Keyword, Punctuation, Text, Name.Class, Text, Punctuation)), + (r'(%init)\b(\()', + bygroups(Keyword, Punctuation), 'logos_init_directive'), + (r'(%init)(?=\s*;)', bygroups(Keyword)), + (r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)', + bygroups(Keyword, Text, Name.Class), '#pop'), + (r'(%subclass)(\s+)', bygroups(Keyword, Text), + ('#pop', 'logos_classname')), + inherit, + ], + 'logos_init_directive': [ + (r'\s+', Text), + (',', Punctuation, ('logos_init_directive', '#pop')), + (r'([a-zA-Z$_][\w$]*)(\s*)(=)(\s*)([^);]*)', + bygroups(Name.Class, Text, Punctuation, Text, Text)), + (r'([a-zA-Z$_][\w$]*)', Name.Class), + (r'\)', Punctuation, '#pop'), + ], + 'logos_classname': [ + (r'([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?', + bygroups(Name.Class, Text, Name.Class), '#pop'), + (r'([a-zA-Z$_][\w$]*)', Name.Class, '#pop') + ], + 'root': [ + (r'(%subclass)(\s+)', bygroups(Keyword, Text), + 'logos_classname'), + (r'(%hook|%group)(\s+)([a-zA-Z$_][\w$]+)', + bygroups(Keyword, Text, Name.Class)), + (r'(%config)(\s*\(\s*)(\w+)(\s*=\s*)(.*?)(\s*\)\s*)', + bygroups(Keyword, Text, Name.Variable, Text, String, Text)), + (r'(%ctor)(\s*)(\{)', bygroups(Keyword, Text, Punctuation), + 'function'), + (r'(%new)(\s*)(\()(\s*.*?\s*)(\))', + bygroups(Keyword, Text, Keyword, String, Keyword)), + (r'(\s*)(%end)(\s*)', bygroups(Text, Keyword, Text)), + inherit, + ], + } + + _logos_keywords = re.compile(r'%(?:hook|ctor|init|c\()') + + def analyse_text(text): + if LogosLexer._logos_keywords.search(text): + return 1.0 + return 0 + + +class SwiftLexer(RegexLexer): + """ + For `Swift `_ source. + + .. versionadded:: 2.0 + """ + name = 'Swift' + filenames = ['*.swift'] + aliases = ['swift'] + mimetypes = ['text/x-swift'] + + tokens = { + 'root': [ + # Whitespace and Comments + (r'\n', Text), + (r'\s+', Text), + (r'//', Comment.Single, 'comment-single'), + (r'/\*', Comment.Multiline, 'comment-multi'), + (r'#(if|elseif|else|endif|available)\b', Comment.Preproc, 'preproc'), + + # Keywords + include('keywords'), + + # Global Types + (words(( + 'Array', 'AutoreleasingUnsafeMutablePointer', 'BidirectionalReverseView', + 'Bit', 'Bool', 'CFunctionPointer', 'COpaquePointer', 'CVaListPointer', + 'Character', 'ClosedInterval', 'CollectionOfOne', 'ContiguousArray', + 'Dictionary', 'DictionaryGenerator', 'DictionaryIndex', 'Double', + 'EmptyCollection', 'EmptyGenerator', 'EnumerateGenerator', + 'EnumerateSequence', 'FilterCollectionView', + 'FilterCollectionViewIndex', 'FilterGenerator', 'FilterSequenceView', + 'Float', 'Float80', 'FloatingPointClassification', 'GeneratorOf', + 'GeneratorOfOne', 'GeneratorSequence', 'HalfOpenInterval', 'HeapBuffer', + 'HeapBufferStorage', 'ImplicitlyUnwrappedOptional', 'IndexingGenerator', + 'Int', 'Int16', 'Int32', 'Int64', 'Int8', 'LazyBidirectionalCollection', + 'LazyForwardCollection', 'LazyRandomAccessCollection', + 'LazySequence', 'MapCollectionView', 'MapSequenceGenerator', + 'MapSequenceView', 'MirrorDisposition', 'ObjectIdentifier', 'OnHeap', + 'Optional', 'PermutationGenerator', 'QuickLookObject', + 'RandomAccessReverseView', 'Range', 'RangeGenerator', 'RawByte', 'Repeat', + 'ReverseBidirectionalIndex', 'ReverseRandomAccessIndex', 'SequenceOf', + 'SinkOf', 'Slice', 'StaticString', 'StrideThrough', 'StrideThroughGenerator', + 'StrideTo', 'StrideToGenerator', 'String', 'UInt', 'UInt16', 'UInt32', + 'UInt64', 'UInt8', 'UTF16', 'UTF32', 'UTF8', 'UnicodeDecodingResult', + 'UnicodeScalar', 'Unmanaged', 'UnsafeBufferPointer', + 'UnsafeBufferPointerGenerator', 'UnsafeMutableBufferPointer', + 'UnsafeMutablePointer', 'UnsafePointer', 'Zip2', 'ZipGenerator2', + # Protocols + 'AbsoluteValuable', 'AnyObject', 'ArrayLiteralConvertible', + 'BidirectionalIndexType', 'BitwiseOperationsType', + 'BooleanLiteralConvertible', 'BooleanType', 'CVarArgType', + 'CollectionType', 'Comparable', 'DebugPrintable', + 'DictionaryLiteralConvertible', 'Equatable', + 'ExtendedGraphemeClusterLiteralConvertible', + 'ExtensibleCollectionType', 'FloatLiteralConvertible', + 'FloatingPointType', 'ForwardIndexType', 'GeneratorType', 'Hashable', + 'IntegerArithmeticType', 'IntegerLiteralConvertible', 'IntegerType', + 'IntervalType', 'MirrorType', 'MutableCollectionType', 'MutableSliceable', + 'NilLiteralConvertible', 'OutputStreamType', 'Printable', + 'RandomAccessIndexType', 'RangeReplaceableCollectionType', + 'RawOptionSetType', 'RawRepresentable', 'Reflectable', 'SequenceType', + 'SignedIntegerType', 'SignedNumberType', 'SinkType', 'Sliceable', + 'Streamable', 'Strideable', 'StringInterpolationConvertible', + 'StringLiteralConvertible', 'UnicodeCodecType', + 'UnicodeScalarLiteralConvertible', 'UnsignedIntegerType', + '_ArrayBufferType', '_BidirectionalIndexType', '_CocoaStringType', + '_CollectionType', '_Comparable', '_ExtensibleCollectionType', + '_ForwardIndexType', '_Incrementable', '_IntegerArithmeticType', + '_IntegerType', '_ObjectiveCBridgeable', '_RandomAccessIndexType', + '_RawOptionSetType', '_SequenceType', '_Sequence_Type', + '_SignedIntegerType', '_SignedNumberType', '_Sliceable', '_Strideable', + '_SwiftNSArrayRequiredOverridesType', '_SwiftNSArrayType', + '_SwiftNSCopyingType', '_SwiftNSDictionaryRequiredOverridesType', + '_SwiftNSDictionaryType', '_SwiftNSEnumeratorType', + '_SwiftNSFastEnumerationType', '_SwiftNSStringRequiredOverridesType', + '_SwiftNSStringType', '_UnsignedIntegerType', + # Variables + 'C_ARGC', 'C_ARGV', 'Process', + # Typealiases + 'Any', 'AnyClass', 'BooleanLiteralType', 'CBool', 'CChar', 'CChar16', + 'CChar32', 'CDouble', 'CFloat', 'CInt', 'CLong', 'CLongLong', 'CShort', + 'CSignedChar', 'CUnsignedInt', 'CUnsignedLong', 'CUnsignedShort', + 'CWideChar', 'ExtendedGraphemeClusterType', 'Float32', 'Float64', + 'FloatLiteralType', 'IntMax', 'IntegerLiteralType', 'StringLiteralType', + 'UIntMax', 'UWord', 'UnicodeScalarType', 'Void', 'Word', + # Foundation/Cocoa + 'NSErrorPointer', 'NSObjectProtocol', 'Selector'), suffix=r'\b'), + Name.Builtin), + # Functions + (words(( + 'abs', 'advance', 'alignof', 'alignofValue', 'assert', 'assertionFailure', + 'contains', 'count', 'countElements', 'debugPrint', 'debugPrintln', + 'distance', 'dropFirst', 'dropLast', 'dump', 'enumerate', 'equal', + 'extend', 'fatalError', 'filter', 'find', 'first', 'getVaList', 'indices', + 'insert', 'isEmpty', 'join', 'last', 'lazy', 'lexicographicalCompare', + 'map', 'max', 'maxElement', 'min', 'minElement', 'numericCast', 'overlaps', + 'partition', 'precondition', 'preconditionFailure', 'prefix', 'print', + 'println', 'reduce', 'reflect', 'removeAll', 'removeAtIndex', 'removeLast', + 'removeRange', 'reverse', 'sizeof', 'sizeofValue', 'sort', 'sorted', + 'splice', 'split', 'startsWith', 'stride', 'strideof', 'strideofValue', + 'suffix', 'swap', 'toDebugString', 'toString', 'transcode', + 'underestimateCount', 'unsafeAddressOf', 'unsafeBitCast', 'unsafeDowncast', + 'withExtendedLifetime', 'withUnsafeMutablePointer', + 'withUnsafeMutablePointers', 'withUnsafePointer', 'withUnsafePointers', + 'withVaList'), suffix=r'\b'), + Name.Builtin.Pseudo), + + # Implicit Block Variables + (r'\$\d+', Name.Variable), + + # Binary Literal + (r'0b[01_]+', Number.Bin), + # Octal Literal + (r'0o[0-7_]+', Number.Oct), + # Hexadecimal Literal + (r'0x[0-9a-fA-F_]+', Number.Hex), + # Decimal Literal + (r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?[0-9_]+|' + r'\.[0-9_]*|[eE][+\-]?[0-9_]+)', Number.Float), + (r'[0-9][0-9_]*', Number.Integer), + # String Literal + (r'"', String, 'string'), + + # Operators and Punctuation + (r'[(){}\[\].,:;=@#`?]|->|[<&?](?=\w)|(?<=\w)[>!?]', Punctuation), + (r'[/=\-+!*%<>&|^?~]+', Operator), + + # Identifier + (r'[a-zA-Z_]\w*', Name) + ], + 'keywords': [ + (words(( + 'as', 'break', 'case', 'catch', 'continue', 'default', 'defer', + 'do', 'else', 'fallthrough', 'for', 'guard', 'if', 'in', 'is', + 'repeat', 'return', '#selector', 'switch', 'throw', 'try', + 'where', 'while'), suffix=r'\b'), + Keyword), + (r'@availability\([^)]+\)', Keyword.Reserved), + (words(( + 'associativity', 'convenience', 'dynamic', 'didSet', 'final', + 'get', 'indirect', 'infix', 'inout', 'lazy', 'left', 'mutating', + 'none', 'nonmutating', 'optional', 'override', 'postfix', + 'precedence', 'prefix', 'Protocol', 'required', 'rethrows', + 'right', 'set', 'throws', 'Type', 'unowned', 'weak', 'willSet', + '@availability', '@autoclosure', '@noreturn', + '@NSApplicationMain', '@NSCopying', '@NSManaged', '@objc', + '@UIApplicationMain', '@IBAction', '@IBDesignable', + '@IBInspectable', '@IBOutlet'), suffix=r'\b'), + Keyword.Reserved), + (r'(as|dynamicType|false|is|nil|self|Self|super|true|__COLUMN__' + r'|__FILE__|__FUNCTION__|__LINE__|_' + r'|#(?:file|line|column|function))\b', Keyword.Constant), + (r'import\b', Keyword.Declaration, 'module'), + (r'(class|enum|extension|struct|protocol)(\s+)([a-zA-Z_]\w*)', + bygroups(Keyword.Declaration, Text, Name.Class)), + (r'(func)(\s+)([a-zA-Z_]\w*)', + bygroups(Keyword.Declaration, Text, Name.Function)), + (r'(var|let)(\s+)([a-zA-Z_]\w*)', bygroups(Keyword.Declaration, + Text, Name.Variable)), + (words(( + 'class', 'deinit', 'enum', 'extension', 'func', 'import', 'init', + 'internal', 'let', 'operator', 'private', 'protocol', 'public', + 'static', 'struct', 'subscript', 'typealias', 'var'), suffix=r'\b'), + Keyword.Declaration) + ], + 'comment': [ + (r':param: [a-zA-Z_]\w*|:returns?:|(FIXME|MARK|TODO):', + Comment.Special) + ], + + # Nested + 'comment-single': [ + (r'\n', Text, '#pop'), + include('comment'), + (r'[^\n]', Comment.Single) + ], + 'comment-multi': [ + include('comment'), + (r'[^*/]', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline) + ], + 'module': [ + (r'\n', Text, '#pop'), + (r'[a-zA-Z_]\w*', Name.Class), + include('root') + ], + 'preproc': [ + (r'\n', Text, '#pop'), + include('keywords'), + (r'[A-Za-z]\w*', Comment.Preproc), + include('root') + ], + 'string': [ + (r'\\\(', String.Interpol, 'string-intp'), + (r'"', String, '#pop'), + (r"""\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}""" + r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}""", String.Escape), + (r'[^\\"]+', String), + (r'\\', String) + ], + 'string-intp': [ + (r'\(', String.Interpol, '#push'), + (r'\)', String.Interpol, '#pop'), + include('root') + ] + } + + def get_tokens_unprocessed(self, text): + from pygments.lexers._cocoa_builtins import COCOA_INTERFACES, \ + COCOA_PROTOCOLS, COCOA_PRIMITIVES + + for index, token, value in \ + RegexLexer.get_tokens_unprocessed(self, text): + if token is Name or token is Name.Class: + if value in COCOA_INTERFACES or value in COCOA_PROTOCOLS \ + or value in COCOA_PRIMITIVES: + token = Name.Builtin.Pseudo + + yield index, token, value diff --git a/pygments/lexers/ooc.py b/pygments/lexers/ooc.py old mode 100644 new mode 100755 index 438719c..f9e8cd0 --- a/pygments/lexers/ooc.py +++ b/pygments/lexers/ooc.py @@ -1,85 +1,85 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.ooc - ~~~~~~~~~~~~~~~~~~~ - - Lexers for the Ooc language. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import RegexLexer, bygroups, words -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation - -__all__ = ['OocLexer'] - - -class OocLexer(RegexLexer): - """ - For `Ooc `_ source code - - .. versionadded:: 1.2 - """ - name = 'Ooc' - aliases = ['ooc'] - filenames = ['*.ooc'] - mimetypes = ['text/x-ooc'] - - tokens = { - 'root': [ - (words(( - 'class', 'interface', 'implement', 'abstract', 'extends', 'from', - 'this', 'super', 'new', 'const', 'final', 'static', 'import', - 'use', 'extern', 'inline', 'proto', 'break', 'continue', - 'fallthrough', 'operator', 'if', 'else', 'for', 'while', 'do', - 'switch', 'case', 'as', 'in', 'version', 'return', 'true', - 'false', 'null'), prefix=r'\b', suffix=r'\b'), - Keyword), - (r'include\b', Keyword, 'include'), - (r'(cover)([ \t]+)(from)([ \t]+)(\w+[*@]?)', - bygroups(Keyword, Text, Keyword, Text, Name.Class)), - (r'(func)((?:[ \t]|\\\n)+)(~[a-z_]\w*)', - bygroups(Keyword, Text, Name.Function)), - (r'\bfunc\b', Keyword), - # Note: %= and ^= not listed on http://ooc-lang.org/syntax - (r'//.*', Comment), - (r'(?s)/\*.*?\*/', Comment.Multiline), - (r'(==?|\+=?|-[=>]?|\*=?|/=?|:=|!=?|%=?|\?|>{1,3}=?|<{1,3}=?|\.\.|' - r'&&?|\|\|?|\^=?)', Operator), - (r'(\.)([ \t]*)([a-z]\w*)', bygroups(Operator, Text, - Name.Function)), - (r'[A-Z][A-Z0-9_]+', Name.Constant), - (r'[A-Z]\w*([@*]|\[[ \t]*\])?', Name.Class), - - (r'([a-z]\w*(?:~[a-z]\w*)?)((?:[ \t]|\\\n)*)(?=\()', - bygroups(Name.Function, Text)), - (r'[a-z]\w*', Name.Variable), - - # : introduces types - (r'[:(){}\[\];,]', Punctuation), - - (r'0x[0-9a-fA-F]+', Number.Hex), - (r'0c[0-9]+', Number.Oct), - (r'0b[01]+', Number.Bin), - (r'[0-9_]\.[0-9_]*(?!\.)', Number.Float), - (r'[0-9_]+', Number.Decimal), - - (r'"(?:\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\"])*"', - String.Double), - (r"'(?:\\.|\\[0-9]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", - String.Char), - (r'@', Punctuation), # pointer dereference - (r'\.', Punctuation), # imports or chain operator - - (r'\\[ \t\n]', Text), - (r'[ \t]+', Text), - ], - 'include': [ - (r'[\w/]+', Name), - (r',', Punctuation), - (r'[ \t]', Text), - (r'[;\n]', Text, '#pop'), - ], - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.ooc + ~~~~~~~~~~~~~~~~~~~ + + Lexers for the Ooc language. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, bygroups, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['OocLexer'] + + +class OocLexer(RegexLexer): + """ + For `Ooc `_ source code + + .. versionadded:: 1.2 + """ + name = 'Ooc' + aliases = ['ooc'] + filenames = ['*.ooc'] + mimetypes = ['text/x-ooc'] + + tokens = { + 'root': [ + (words(( + 'class', 'interface', 'implement', 'abstract', 'extends', 'from', + 'this', 'super', 'new', 'const', 'final', 'static', 'import', + 'use', 'extern', 'inline', 'proto', 'break', 'continue', + 'fallthrough', 'operator', 'if', 'else', 'for', 'while', 'do', + 'switch', 'case', 'as', 'in', 'version', 'return', 'true', + 'false', 'null'), prefix=r'\b', suffix=r'\b'), + Keyword), + (r'include\b', Keyword, 'include'), + (r'(cover)([ \t]+)(from)([ \t]+)(\w+[*@]?)', + bygroups(Keyword, Text, Keyword, Text, Name.Class)), + (r'(func)((?:[ \t]|\\\n)+)(~[a-z_]\w*)', + bygroups(Keyword, Text, Name.Function)), + (r'\bfunc\b', Keyword), + # Note: %= and ^= not listed on http://ooc-lang.org/syntax + (r'//.*', Comment), + (r'(?s)/\*.*?\*/', Comment.Multiline), + (r'(==?|\+=?|-[=>]?|\*=?|/=?|:=|!=?|%=?|\?|>{1,3}=?|<{1,3}=?|\.\.|' + r'&&?|\|\|?|\^=?)', Operator), + (r'(\.)([ \t]*)([a-z]\w*)', bygroups(Operator, Text, + Name.Function)), + (r'[A-Z][A-Z0-9_]+', Name.Constant), + (r'[A-Z]\w*([@*]|\[[ \t]*\])?', Name.Class), + + (r'([a-z]\w*(?:~[a-z]\w*)?)((?:[ \t]|\\\n)*)(?=\()', + bygroups(Name.Function, Text)), + (r'[a-z]\w*', Name.Variable), + + # : introduces types + (r'[:(){}\[\];,]', Punctuation), + + (r'0x[0-9a-fA-F]+', Number.Hex), + (r'0c[0-9]+', Number.Oct), + (r'0b[01]+', Number.Bin), + (r'[0-9_]\.[0-9_]*(?!\.)', Number.Float), + (r'[0-9_]+', Number.Decimal), + + (r'"(?:\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\"])*"', + String.Double), + (r"'(?:\\.|\\[0-9]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", + String.Char), + (r'@', Punctuation), # pointer dereference + (r'\.', Punctuation), # imports or chain operator + + (r'\\[ \t\n]', Text), + (r'[ \t]+', Text), + ], + 'include': [ + (r'[\w/]+', Name), + (r',', Punctuation), + (r'[ \t]', Text), + (r'[;\n]', Text, '#pop'), + ], + } diff --git a/pygments/lexers/other.py b/pygments/lexers/other.py old mode 100644 new mode 100755 index c3a60ce..65b2885 --- a/pygments/lexers/other.py +++ b/pygments/lexers/other.py @@ -1,41 +1,41 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.other - ~~~~~~~~~~~~~~~~~~~~~ - - Just export lexer classes previously contained in this module. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexers.sql import SqlLexer, MySqlLexer, SqliteConsoleLexer -from pygments.lexers.shell import BashLexer, BashSessionLexer, BatchLexer, \ - TcshLexer -from pygments.lexers.robotframework import RobotFrameworkLexer -from pygments.lexers.testing import GherkinLexer -from pygments.lexers.esoteric import BrainfuckLexer, BefungeLexer, RedcodeLexer -from pygments.lexers.prolog import LogtalkLexer -from pygments.lexers.snobol import SnobolLexer -from pygments.lexers.rebol import RebolLexer -from pygments.lexers.configs import KconfigLexer, Cfengine3Lexer -from pygments.lexers.modeling import ModelicaLexer -from pygments.lexers.scripting import AppleScriptLexer, MOOCodeLexer, \ - HybrisLexer -from pygments.lexers.graphics import PostScriptLexer, GnuplotLexer, \ - AsymptoteLexer, PovrayLexer -from pygments.lexers.business import ABAPLexer, OpenEdgeLexer, \ - GoodDataCLLexer, MaqlLexer -from pygments.lexers.automation import AutoItLexer, AutohotkeyLexer -from pygments.lexers.dsls import ProtoBufLexer, BroLexer, PuppetLexer, \ - MscgenLexer, VGLLexer -from pygments.lexers.basic import CbmBasicV2Lexer -from pygments.lexers.pawn import SourcePawnLexer, PawnLexer -from pygments.lexers.ecl import ECLLexer -from pygments.lexers.urbi import UrbiscriptLexer -from pygments.lexers.smalltalk import SmalltalkLexer, NewspeakLexer -from pygments.lexers.installers import NSISLexer, RPMSpecLexer -from pygments.lexers.textedit import AwkLexer -from pygments.lexers.smv import NuSMVLexer - -__all__ = [] +# -*- coding: utf-8 -*- +""" + pygments.lexers.other + ~~~~~~~~~~~~~~~~~~~~~ + + Just export lexer classes previously contained in this module. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexers.sql import SqlLexer, MySqlLexer, SqliteConsoleLexer +from pygments.lexers.shell import BashLexer, BashSessionLexer, BatchLexer, \ + TcshLexer +from pygments.lexers.robotframework import RobotFrameworkLexer +from pygments.lexers.testing import GherkinLexer +from pygments.lexers.esoteric import BrainfuckLexer, BefungeLexer, RedcodeLexer +from pygments.lexers.prolog import LogtalkLexer +from pygments.lexers.snobol import SnobolLexer +from pygments.lexers.rebol import RebolLexer +from pygments.lexers.configs import KconfigLexer, Cfengine3Lexer +from pygments.lexers.modeling import ModelicaLexer +from pygments.lexers.scripting import AppleScriptLexer, MOOCodeLexer, \ + HybrisLexer +from pygments.lexers.graphics import PostScriptLexer, GnuplotLexer, \ + AsymptoteLexer, PovrayLexer +from pygments.lexers.business import ABAPLexer, OpenEdgeLexer, \ + GoodDataCLLexer, MaqlLexer +from pygments.lexers.automation import AutoItLexer, AutohotkeyLexer +from pygments.lexers.dsls import ProtoBufLexer, BroLexer, PuppetLexer, \ + MscgenLexer, VGLLexer +from pygments.lexers.basic import CbmBasicV2Lexer +from pygments.lexers.pawn import SourcePawnLexer, PawnLexer +from pygments.lexers.ecl import ECLLexer +from pygments.lexers.urbi import UrbiscriptLexer +from pygments.lexers.smalltalk import SmalltalkLexer, NewspeakLexer +from pygments.lexers.installers import NSISLexer, RPMSpecLexer +from pygments.lexers.textedit import AwkLexer +from pygments.lexers.smv import NuSMVLexer + +__all__ = [] diff --git a/pygments/lexers/parasail.py b/pygments/lexers/parasail.py old mode 100644 new mode 100755 index 7f8cf07..ea31003 --- a/pygments/lexers/parasail.py +++ b/pygments/lexers/parasail.py @@ -1,79 +1,79 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.parasail - ~~~~~~~~~~~~~~~~~~~~~~~~ - - Lexer for ParaSail. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, include -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Literal - -__all__ = ['ParaSailLexer'] - - -class ParaSailLexer(RegexLexer): - """ - For `ParaSail `_ source code. - - .. versionadded:: 2.1 - """ - - name = 'ParaSail' - aliases = ['parasail'] - filenames = ['*.psi', '*.psl'] - mimetypes = ['text/x-parasail'] - - flags = re.MULTILINE - - tokens = { - 'root': [ - (r'[^\S\n]+', Text), - (r'//.*?\n', Comment.Single), - (r'\b(and|or|xor)=', Operator.Word), - (r'\b(and(\s+then)?|or(\s+else)?|xor|rem|mod|' - r'(is|not)\s+null)\b', - Operator.Word), - # Keywords - (r'\b(abs|abstract|all|block|class|concurrent|const|continue|' - r'each|end|exit|extends|exports|forward|func|global|implements|' - r'import|in|interface|is|lambda|locked|new|not|null|of|op|' - r'optional|private|queued|ref|return|reverse|separate|some|' - r'type|until|var|with|' - # Control flow - r'if|then|else|elsif|case|for|while|loop)\b', - Keyword.Reserved), - (r'(abstract\s+)?(interface|class|op|func|type)', - Keyword.Declaration), - # Literals - (r'"[^"]*"', String), - (r'\\[\'ntrf"0]', String.Escape), - (r'#[a-zA-Z]\w*', Literal), # Enumeration - include('numbers'), - (r"'[^']'", String.Char), - (r'[a-zA-Z]\w*', Name), - # Operators and Punctuation - (r'(<==|==>|<=>|\*\*=|<\|=|<<=|>>=|==|!=|=\?|<=|>=|' - r'\*\*|<<|>>|=>|:=|\+=|-=|\*=|\|=|\||/=|\+|-|\*|/|' - r'\.\.|<\.\.|\.\.<|<\.\.<)', - Operator), - (r'(<|>|\[|\]|\(|\)|\||:|;|,|.|\{|\}|->)', - Punctuation), - (r'\n+', Text), - ], - 'numbers': [ - (r'\d[0-9_]*#[0-9a-fA-F][0-9a-fA-F_]*#', Number.Hex), # any base - (r'0[xX][0-9a-fA-F][0-9a-fA-F_]*', Number.Hex), # C-like hex - (r'0[bB][01][01_]*', Number.Bin), # C-like bin - (r'\d[0-9_]*\.\d[0-9_]*[eE][+-]\d[0-9_]*', # float exp - Number.Float), - (r'\d[0-9_]*\.\d[0-9_]*', Number.Float), # float - (r'\d[0-9_]*', Number.Integer), # integer - ], - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.parasail + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexer for ParaSail. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Literal + +__all__ = ['ParaSailLexer'] + + +class ParaSailLexer(RegexLexer): + """ + For `ParaSail `_ source code. + + .. versionadded:: 2.1 + """ + + name = 'ParaSail' + aliases = ['parasail'] + filenames = ['*.psi', '*.psl'] + mimetypes = ['text/x-parasail'] + + flags = re.MULTILINE + + tokens = { + 'root': [ + (r'[^\S\n]+', Text), + (r'//.*?\n', Comment.Single), + (r'\b(and|or|xor)=', Operator.Word), + (r'\b(and(\s+then)?|or(\s+else)?|xor|rem|mod|' + r'(is|not)\s+null)\b', + Operator.Word), + # Keywords + (r'\b(abs|abstract|all|block|class|concurrent|const|continue|' + r'each|end|exit|extends|exports|forward|func|global|implements|' + r'import|in|interface|is|lambda|locked|new|not|null|of|op|' + r'optional|private|queued|ref|return|reverse|separate|some|' + r'type|until|var|with|' + # Control flow + r'if|then|else|elsif|case|for|while|loop)\b', + Keyword.Reserved), + (r'(abstract\s+)?(interface|class|op|func|type)', + Keyword.Declaration), + # Literals + (r'"[^"]*"', String), + (r'\\[\'ntrf"0]', String.Escape), + (r'#[a-zA-Z]\w*', Literal), # Enumeration + include('numbers'), + (r"'[^']'", String.Char), + (r'[a-zA-Z]\w*', Name), + # Operators and Punctuation + (r'(<==|==>|<=>|\*\*=|<\|=|<<=|>>=|==|!=|=\?|<=|>=|' + r'\*\*|<<|>>|=>|:=|\+=|-=|\*=|\|=|\||/=|\+|-|\*|/|' + r'\.\.|<\.\.|\.\.<|<\.\.<)', + Operator), + (r'(<|>|\[|\]|\(|\)|\||:|;|,|.|\{|\}|->)', + Punctuation), + (r'\n+', Text), + ], + 'numbers': [ + (r'\d[0-9_]*#[0-9a-fA-F][0-9a-fA-F_]*#', Number.Hex), # any base + (r'0[xX][0-9a-fA-F][0-9a-fA-F_]*', Number.Hex), # C-like hex + (r'0[bB][01][01_]*', Number.Bin), # C-like bin + (r'\d[0-9_]*\.\d[0-9_]*[eE][+-]\d[0-9_]*', # float exp + Number.Float), + (r'\d[0-9_]*\.\d[0-9_]*', Number.Float), # float + (r'\d[0-9_]*', Number.Integer), # integer + ], + } diff --git a/pygments/lexers/parsers.py b/pygments/lexers/parsers.py old mode 100644 new mode 100755 index 8bcbfc5..bb29087 --- a/pygments/lexers/parsers.py +++ b/pygments/lexers/parsers.py @@ -1,835 +1,800 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.parsers - ~~~~~~~~~~~~~~~~~~~~~~~ - - Lexers for parser generators. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, DelegatingLexer, \ - include, bygroups, using -from pygments.token import Punctuation, Other, Text, Comment, Operator, \ - Keyword, Name, String, Number, Whitespace -from pygments.lexers.jvm import JavaLexer -from pygments.lexers.c_cpp import CLexer, CppLexer -from pygments.lexers.objective import ObjectiveCLexer -from pygments.lexers.d import DLexer -from pygments.lexers.dotnet import CSharpLexer -from pygments.lexers.ruby import RubyLexer -from pygments.lexers.python import PythonLexer -from pygments.lexers.perl import PerlLexer - -__all__ = ['RagelLexer', 'RagelEmbeddedLexer', 'RagelCLexer', 'RagelDLexer', - 'RagelCppLexer', 'RagelObjectiveCLexer', 'RagelRubyLexer', - 'RagelJavaLexer', 'AntlrLexer', 'AntlrPythonLexer', - 'AntlrPerlLexer', 'AntlrRubyLexer', 'AntlrCppLexer', - # 'AntlrCLexer', - 'AntlrCSharpLexer', 'AntlrObjectiveCLexer', - 'AntlrJavaLexer', 'AntlrActionScriptLexer', - 'TreetopLexer', 'EbnfLexer'] - - -class RagelLexer(RegexLexer): - """ - A pure `Ragel `_ lexer. Use this for - fragments of Ragel. For ``.rl`` files, use RagelEmbeddedLexer instead - (or one of the language-specific subclasses). - - .. versionadded:: 1.1 - """ - - name = 'Ragel' - aliases = ['ragel'] - filenames = [] - - tokens = { - 'whitespace': [ - (r'\s+', Whitespace) - ], - 'comments': [ - (r'\#.*$', Comment), - ], - 'keywords': [ - (r'(access|action|alphtype)\b', Keyword), - (r'(getkey|write|machine|include)\b', Keyword), - (r'(any|ascii|extend|alpha|digit|alnum|lower|upper)\b', Keyword), - (r'(xdigit|cntrl|graph|print|punct|space|zlen|empty)\b', Keyword) - ], - 'numbers': [ - (r'0x[0-9A-Fa-f]+', Number.Hex), - (r'[+-]?[0-9]+', Number.Integer), - ], - 'literals': [ - (r'"(\\\\|\\"|[^"])*"', String), # double quote string - (r"'(\\\\|\\'|[^'])*'", String), # single quote string - (r'\[(\\\\|\\\]|[^\]])*\]', String), # square bracket literals - (r'/(?!\*)(\\\\|\\/|[^/])*/', String.Regex), # regular expressions - ], - 'identifiers': [ - (r'[a-zA-Z_]\w*', Name.Variable), - ], - 'operators': [ - (r',', Operator), # Join - (r'\||&|--?', Operator), # Union, Intersection and Subtraction - (r'\.|<:|:>>?', Operator), # Concatention - (r':', Operator), # Label - (r'->', Operator), # Epsilon Transition - (r'(>|\$|%|<|@|<>)(/|eof\b)', Operator), # EOF Actions - (r'(>|\$|%|<|@|<>)(!|err\b)', Operator), # Global Error Actions - (r'(>|\$|%|<|@|<>)(\^|lerr\b)', Operator), # Local Error Actions - (r'(>|\$|%|<|@|<>)(~|to\b)', Operator), # To-State Actions - (r'(>|\$|%|<|@|<>)(\*|from\b)', Operator), # From-State Actions - (r'>|@|\$|%', Operator), # Transition Actions and Priorities - (r'\*|\?|\+|\{[0-9]*,[0-9]*\}', Operator), # Repetition - (r'!|\^', Operator), # Negation - (r'\(|\)', Operator), # Grouping - ], - 'root': [ - include('literals'), - include('whitespace'), - include('comments'), - include('keywords'), - include('numbers'), - include('identifiers'), - include('operators'), - (r'\{', Punctuation, 'host'), - (r'=', Operator), - (r';', Punctuation), - ], - 'host': [ - (r'(' + r'|'.join(( # keep host code in largest possible chunks - r'[^{}\'"/#]+', # exclude unsafe characters - r'[^\\]\\[{}]', # allow escaped { or } - - # strings and comments may safely contain unsafe characters - r'"(\\\\|\\"|[^"])*"', # double quote string - r"'(\\\\|\\'|[^'])*'", # single quote string - r'//.*$\n?', # single line comment - r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment - r'\#.*$\n?', # ruby comment - - # regular expression: There's no reason for it to start - # with a * and this stops confusion with comments. - r'/(?!\*)(\\\\|\\/|[^/])*/', - - # / is safe now that we've handled regex and javadoc comments - r'/', - )) + r')+', Other), - - (r'\{', Punctuation, '#push'), - (r'\}', Punctuation, '#pop'), - ], - } - - -class RagelEmbeddedLexer(RegexLexer): - """ - A lexer for `Ragel`_ embedded in a host language file. - - This will only highlight Ragel statements. If you want host language - highlighting then call the language-specific Ragel lexer. - - .. versionadded:: 1.1 - """ - - name = 'Embedded Ragel' - aliases = ['ragel-em'] - filenames = ['*.rl'] - - tokens = { - 'root': [ - (r'(' + r'|'.join(( # keep host code in largest possible chunks - r'[^%\'"/#]+', # exclude unsafe characters - r'%(?=[^%]|$)', # a single % sign is okay, just not 2 of them - - # strings and comments may safely contain unsafe characters - r'"(\\\\|\\"|[^"])*"', # double quote string - r"'(\\\\|\\'|[^'])*'", # single quote string - r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment - r'//.*$\n?', # single line comment - r'\#.*$\n?', # ruby/ragel comment - r'/(?!\*)(\\\\|\\/|[^/])*/', # regular expression - - # / is safe now that we've handled regex and javadoc comments - r'/', - )) + r')+', Other), - - # Single Line FSM. - # Please don't put a quoted newline in a single line FSM. - # That's just mean. It will break this. - (r'(%%)(?![{%])(.*)($|;)(\n?)', bygroups(Punctuation, - using(RagelLexer), - Punctuation, Text)), - - # Multi Line FSM. - (r'(%%%%|%%)\{', Punctuation, 'multi-line-fsm'), - ], - 'multi-line-fsm': [ - (r'(' + r'|'.join(( # keep ragel code in largest possible chunks. - r'(' + r'|'.join(( - r'[^}\'"\[/#]', # exclude unsafe characters - r'\}(?=[^%]|$)', # } is okay as long as it's not followed by % - r'\}%(?=[^%]|$)', # ...well, one %'s okay, just not two... - r'[^\\]\\[{}]', # ...and } is okay if it's escaped - - # allow / if it's preceded with one of these symbols - # (ragel EOF actions) - r'(>|\$|%|<|@|<>)/', - - # specifically allow regex followed immediately by * - # so it doesn't get mistaken for a comment - r'/(?!\*)(\\\\|\\/|[^/])*/\*', - - # allow / as long as it's not followed by another / or by a * - r'/(?=[^/*]|$)', - - # We want to match as many of these as we can in one block. - # Not sure if we need the + sign here, - # does it help performance? - )) + r')+', - - # strings and comments may safely contain unsafe characters - r'"(\\\\|\\"|[^"])*"', # double quote string - r"'(\\\\|\\'|[^'])*'", # single quote string - r"\[(\\\\|\\\]|[^\]])*\]", # square bracket literal - r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment - r'//.*$\n?', # single line comment - r'\#.*$\n?', # ruby/ragel comment - )) + r')+', using(RagelLexer)), - - (r'\}%%', Punctuation, '#pop'), - ] - } - - def analyse_text(text): - return '@LANG: indep' in text - - -class RagelRubyLexer(DelegatingLexer): - """ - A lexer for `Ragel`_ in a Ruby host file. - - .. versionadded:: 1.1 - """ - - name = 'Ragel in Ruby Host' - aliases = ['ragel-ruby', 'ragel-rb'] - filenames = ['*.rl'] - - def __init__(self, **options): - super(RagelRubyLexer, self).__init__(RubyLexer, RagelEmbeddedLexer, - **options) - - def analyse_text(text): - return '@LANG: ruby' in text - - -class RagelCLexer(DelegatingLexer): - """ - A lexer for `Ragel`_ in a C host file. - - .. versionadded:: 1.1 - """ - - name = 'Ragel in C Host' - aliases = ['ragel-c'] - filenames = ['*.rl'] - - def __init__(self, **options): - super(RagelCLexer, self).__init__(CLexer, RagelEmbeddedLexer, - **options) - - def analyse_text(text): - return '@LANG: c' in text - - -class RagelDLexer(DelegatingLexer): - """ - A lexer for `Ragel`_ in a D host file. - - .. versionadded:: 1.1 - """ - - name = 'Ragel in D Host' - aliases = ['ragel-d'] - filenames = ['*.rl'] - - def __init__(self, **options): - super(RagelDLexer, self).__init__(DLexer, RagelEmbeddedLexer, **options) - - def analyse_text(text): - return '@LANG: d' in text - - -class RagelCppLexer(DelegatingLexer): - """ - A lexer for `Ragel`_ in a CPP host file. - - .. versionadded:: 1.1 - """ - - name = 'Ragel in CPP Host' - aliases = ['ragel-cpp'] - filenames = ['*.rl'] - - def __init__(self, **options): - super(RagelCppLexer, self).__init__(CppLexer, RagelEmbeddedLexer, **options) - - def analyse_text(text): - return '@LANG: c++' in text - - -class RagelObjectiveCLexer(DelegatingLexer): - """ - A lexer for `Ragel`_ in an Objective C host file. - - .. versionadded:: 1.1 - """ - - name = 'Ragel in Objective C Host' - aliases = ['ragel-objc'] - filenames = ['*.rl'] - - def __init__(self, **options): - super(RagelObjectiveCLexer, self).__init__(ObjectiveCLexer, - RagelEmbeddedLexer, - **options) - - def analyse_text(text): - return '@LANG: objc' in text - - -class RagelJavaLexer(DelegatingLexer): - """ - A lexer for `Ragel`_ in a Java host file. - - .. versionadded:: 1.1 - """ - - name = 'Ragel in Java Host' - aliases = ['ragel-java'] - filenames = ['*.rl'] - - def __init__(self, **options): - super(RagelJavaLexer, self).__init__(JavaLexer, RagelEmbeddedLexer, - **options) - - def analyse_text(text): - return '@LANG: java' in text - - -class AntlrLexer(RegexLexer): - """ - Generic `ANTLR`_ Lexer. - Should not be called directly, instead - use DelegatingLexer for your target language. - - .. versionadded:: 1.1 - - .. _ANTLR: http://www.antlr.org/ - """ - - name = 'ANTLR' - aliases = ['antlr'] - filenames = [] - - _id = r'[A-Za-z]\w*' - _TOKEN_REF = r'[A-Z]\w*' - _RULE_REF = r'[a-z]\w*' - _STRING_LITERAL = r'\'(?:\\\\|\\\'|[^\']*)\'' - _INT = r'[0-9]+' - - tokens = { - 'whitespace': [ - (r'\s+', Whitespace), - ], - 'comments': [ - (r'//.*$', Comment), - (r'/\*(.|\n)*?\*/', Comment), - ], - 'root': [ - include('whitespace'), - include('comments'), - - (r'(lexer|parser|tree)?(\s*)(grammar\b)(\s*)(' + _id + ')(;)', - bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Class, - Punctuation)), - # optionsSpec - (r'options\b', Keyword, 'options'), - # tokensSpec - (r'tokens\b', Keyword, 'tokens'), - # attrScope - (r'(scope)(\s*)(' + _id + r')(\s*)(\{)', - bygroups(Keyword, Whitespace, Name.Variable, Whitespace, - Punctuation), 'action'), - # exception - (r'(catch|finally)\b', Keyword, 'exception'), - # action - (r'(@' + _id + r')(\s*)(::)?(\s*)(' + _id + r')(\s*)(\{)', - bygroups(Name.Label, Whitespace, Punctuation, Whitespace, - Name.Label, Whitespace, Punctuation), 'action'), - # rule - (r'((?:protected|private|public|fragment)\b)?(\s*)(' + _id + ')(!)?', - bygroups(Keyword, Whitespace, Name.Label, Punctuation), - ('rule-alts', 'rule-prelims')), - ], - 'exception': [ - (r'\n', Whitespace, '#pop'), - (r'\s', Whitespace), - include('comments'), - - (r'\[', Punctuation, 'nested-arg-action'), - (r'\{', Punctuation, 'action'), - ], - 'rule-prelims': [ - include('whitespace'), - include('comments'), - - (r'returns\b', Keyword), - (r'\[', Punctuation, 'nested-arg-action'), - (r'\{', Punctuation, 'action'), - # throwsSpec - (r'(throws)(\s+)(' + _id + ')', - bygroups(Keyword, Whitespace, Name.Label)), - (r'(,)(\s*)(' + _id + ')', - bygroups(Punctuation, Whitespace, Name.Label)), # Additional throws - # optionsSpec - (r'options\b', Keyword, 'options'), - # ruleScopeSpec - scope followed by target language code or name of action - # TODO finish implementing other possibilities for scope - # L173 ANTLRv3.g from ANTLR book - (r'(scope)(\s+)(\{)', bygroups(Keyword, Whitespace, Punctuation), - 'action'), - (r'(scope)(\s+)(' + _id + r')(\s*)(;)', - bygroups(Keyword, Whitespace, Name.Label, Whitespace, Punctuation)), - # ruleAction - (r'(@' + _id + r')(\s*)(\{)', - bygroups(Name.Label, Whitespace, Punctuation), 'action'), - # finished prelims, go to rule alts! - (r':', Punctuation, '#pop') - ], - 'rule-alts': [ - include('whitespace'), - include('comments'), - - # These might need to go in a separate 'block' state triggered by ( - (r'options\b', Keyword, 'options'), - (r':', Punctuation), - - # literals - (r"'(\\\\|\\'|[^'])*'", String), - (r'"(\\\\|\\"|[^"])*"', String), - (r'<<([^>]|>[^>])>>', String), - # identifiers - # Tokens start with capital letter. - (r'\$?[A-Z_]\w*', Name.Constant), - # Rules start with small letter. - (r'\$?[a-z_]\w*', Name.Variable), - # operators - (r'(\+|\||->|=>|=|\(|\)|\.\.|\.|\?|\*|\^|!|\#|~)', Operator), - (r',', Punctuation), - (r'\[', Punctuation, 'nested-arg-action'), - (r'\{', Punctuation, 'action'), - (r';', Punctuation, '#pop') - ], - 'tokens': [ - include('whitespace'), - include('comments'), - (r'\{', Punctuation), - (r'(' + _TOKEN_REF + r')(\s*)(=)?(\s*)(' + _STRING_LITERAL - + r')?(\s*)(;)', - bygroups(Name.Label, Whitespace, Punctuation, Whitespace, - String, Whitespace, Punctuation)), - (r'\}', Punctuation, '#pop'), - ], - 'options': [ - include('whitespace'), - include('comments'), - (r'\{', Punctuation), - (r'(' + _id + r')(\s*)(=)(\s*)(' + - '|'.join((_id, _STRING_LITERAL, _INT, r'\*')) + r')(\s*)(;)', - bygroups(Name.Variable, Whitespace, Punctuation, Whitespace, - Text, Whitespace, Punctuation)), - (r'\}', Punctuation, '#pop'), - ], - 'action': [ - (r'(' + r'|'.join(( # keep host code in largest possible chunks - r'[^${}\'"/\\]+', # exclude unsafe characters - - # strings and comments may safely contain unsafe characters - r'"(\\\\|\\"|[^"])*"', # double quote string - r"'(\\\\|\\'|[^'])*'", # single quote string - r'//.*$\n?', # single line comment - r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment - - # regular expression: There's no reason for it to start - # with a * and this stops confusion with comments. - r'/(?!\*)(\\\\|\\/|[^/])*/', - - # backslashes are okay, as long as we are not backslashing a % - r'\\(?!%)', - - # Now that we've handled regex and javadoc comments - # it's safe to let / through. - r'/', - )) + r')+', Other), - (r'(\\)(%)', bygroups(Punctuation, Other)), - (r'(\$[a-zA-Z]+)(\.?)(text|value)?', - bygroups(Name.Variable, Punctuation, Name.Property)), - (r'\{', Punctuation, '#push'), - (r'\}', Punctuation, '#pop'), - ], - 'nested-arg-action': [ - (r'(' + r'|'.join(( # keep host code in largest possible chunks. - r'[^$\[\]\'"/]+', # exclude unsafe characters - - # strings and comments may safely contain unsafe characters - r'"(\\\\|\\"|[^"])*"', # double quote string - r"'(\\\\|\\'|[^'])*'", # single quote string - r'//.*$\n?', # single line comment - r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment - - # regular expression: There's no reason for it to start - # with a * and this stops confusion with comments. - r'/(?!\*)(\\\\|\\/|[^/])*/', - - # Now that we've handled regex and javadoc comments - # it's safe to let / through. - r'/', - )) + r')+', Other), - - - (r'\[', Punctuation, '#push'), - (r'\]', Punctuation, '#pop'), - (r'(\$[a-zA-Z]+)(\.?)(text|value)?', - bygroups(Name.Variable, Punctuation, Name.Property)), - (r'(\\\\|\\\]|\\\[|[^\[\]])+', Other), - ] - } - - def analyse_text(text): - return re.search(r'^\s*grammar\s+[a-zA-Z0-9]+\s*;', text, re.M) - -# http://www.antlr.org/wiki/display/ANTLR3/Code+Generation+Targets - -# TH: I'm not aware of any language features of C++ that will cause -# incorrect lexing of C files. Antlr doesn't appear to make a distinction, -# so just assume they're C++. No idea how to make Objective C work in the -# future. - -# class AntlrCLexer(DelegatingLexer): -# """ -# ANTLR with C Target -# -# .. versionadded:: 1.1 -# """ -# -# name = 'ANTLR With C Target' -# aliases = ['antlr-c'] -# filenames = ['*.G', '*.g'] -# -# def __init__(self, **options): -# super(AntlrCLexer, self).__init__(CLexer, AntlrLexer, **options) -# -# def analyse_text(text): -# return re.match(r'^\s*language\s*=\s*C\s*;', text) - - -class AntlrCppLexer(DelegatingLexer): - """ - `ANTLR`_ with CPP Target - - .. versionadded:: 1.1 - """ - - name = 'ANTLR With CPP Target' - aliases = ['antlr-cpp'] - filenames = ['*.G', '*.g'] - - def __init__(self, **options): - super(AntlrCppLexer, self).__init__(CppLexer, AntlrLexer, **options) - - def analyse_text(text): - return AntlrLexer.analyse_text(text) and \ - re.search(r'^\s*language\s*=\s*C\s*;', text, re.M) - - -class AntlrObjectiveCLexer(DelegatingLexer): - """ - `ANTLR`_ with Objective-C Target - - .. versionadded:: 1.1 - """ - - name = 'ANTLR With ObjectiveC Target' - aliases = ['antlr-objc'] - filenames = ['*.G', '*.g'] - - def __init__(self, **options): - super(AntlrObjectiveCLexer, self).__init__(ObjectiveCLexer, - AntlrLexer, **options) - - def analyse_text(text): - return AntlrLexer.analyse_text(text) and \ - re.search(r'^\s*language\s*=\s*ObjC\s*;', text) - - -class AntlrCSharpLexer(DelegatingLexer): - """ - `ANTLR`_ with C# Target - - .. versionadded:: 1.1 - """ - - name = 'ANTLR With C# Target' - aliases = ['antlr-csharp', 'antlr-c#'] - filenames = ['*.G', '*.g'] - - def __init__(self, **options): - super(AntlrCSharpLexer, self).__init__(CSharpLexer, AntlrLexer, - **options) - - def analyse_text(text): - return AntlrLexer.analyse_text(text) and \ - re.search(r'^\s*language\s*=\s*CSharp2\s*;', text, re.M) - - -class AntlrPythonLexer(DelegatingLexer): - """ - `ANTLR`_ with Python Target - - .. versionadded:: 1.1 - """ - - name = 'ANTLR With Python Target' - aliases = ['antlr-python'] - filenames = ['*.G', '*.g'] - - def __init__(self, **options): - super(AntlrPythonLexer, self).__init__(PythonLexer, AntlrLexer, - **options) - - def analyse_text(text): - return AntlrLexer.analyse_text(text) and \ - re.search(r'^\s*language\s*=\s*Python\s*;', text, re.M) - - -class AntlrJavaLexer(DelegatingLexer): - """ - `ANTLR`_ with Java Target - - .. versionadded:: 1. - """ - - name = 'ANTLR With Java Target' - aliases = ['antlr-java'] - filenames = ['*.G', '*.g'] - - def __init__(self, **options): - super(AntlrJavaLexer, self).__init__(JavaLexer, AntlrLexer, - **options) - - def analyse_text(text): - # Antlr language is Java by default - return AntlrLexer.analyse_text(text) and 0.9 - - -class AntlrRubyLexer(DelegatingLexer): - """ - `ANTLR`_ with Ruby Target - - .. versionadded:: 1.1 - """ - - name = 'ANTLR With Ruby Target' - aliases = ['antlr-ruby', 'antlr-rb'] - filenames = ['*.G', '*.g'] - - def __init__(self, **options): - super(AntlrRubyLexer, self).__init__(RubyLexer, AntlrLexer, - **options) - - def analyse_text(text): - return AntlrLexer.analyse_text(text) and \ - re.search(r'^\s*language\s*=\s*Ruby\s*;', text, re.M) - - -class AntlrPerlLexer(DelegatingLexer): - """ - `ANTLR`_ with Perl Target - - .. versionadded:: 1.1 - """ - - name = 'ANTLR With Perl Target' - aliases = ['antlr-perl'] - filenames = ['*.G', '*.g'] - - def __init__(self, **options): - super(AntlrPerlLexer, self).__init__(PerlLexer, AntlrLexer, - **options) - - def analyse_text(text): - return AntlrLexer.analyse_text(text) and \ - re.search(r'^\s*language\s*=\s*Perl5\s*;', text, re.M) - - -class AntlrActionScriptLexer(DelegatingLexer): - """ - `ANTLR`_ with ActionScript Target - - .. versionadded:: 1.1 - """ - - name = 'ANTLR With ActionScript Target' - aliases = ['antlr-as', 'antlr-actionscript'] - filenames = ['*.G', '*.g'] - - def __init__(self, **options): - from pygments.lexers.actionscript import ActionScriptLexer - super(AntlrActionScriptLexer, self).__init__(ActionScriptLexer, - AntlrLexer, **options) - - def analyse_text(text): - return AntlrLexer.analyse_text(text) and \ - re.search(r'^\s*language\s*=\s*ActionScript\s*;', text, re.M) - - -class TreetopBaseLexer(RegexLexer): - """ - A base lexer for `Treetop `_ grammars. - Not for direct use; use TreetopLexer instead. - - .. versionadded:: 1.6 - """ - - tokens = { - 'root': [ - include('space'), - (r'require[ \t]+[^\n\r]+[\n\r]', Other), - (r'module\b', Keyword.Namespace, 'module'), - (r'grammar\b', Keyword, 'grammar'), - ], - 'module': [ - include('space'), - include('end'), - (r'module\b', Keyword, '#push'), - (r'grammar\b', Keyword, 'grammar'), - (r'[A-Z]\w*(?:::[A-Z]\w*)*', Name.Namespace), - ], - 'grammar': [ - include('space'), - include('end'), - (r'rule\b', Keyword, 'rule'), - (r'include\b', Keyword, 'include'), - (r'[A-Z]\w*', Name), - ], - 'include': [ - include('space'), - (r'[A-Z]\w*(?:::[A-Z]\w*)*', Name.Class, '#pop'), - ], - 'rule': [ - include('space'), - include('end'), - (r'"(\\\\|\\"|[^"])*"', String.Double), - (r"'(\\\\|\\'|[^'])*'", String.Single), - (r'([A-Za-z_]\w*)(:)', bygroups(Name.Label, Punctuation)), - (r'[A-Za-z_]\w*', Name), - (r'[()]', Punctuation), - (r'[?+*/&!~]', Operator), - (r'\[(?:\\.|\[:\^?[a-z]+:\]|[^\\\]])+\]', String.Regex), - (r'([0-9]*)(\.\.)([0-9]*)', - bygroups(Number.Integer, Operator, Number.Integer)), - (r'(<)([^>]+)(>)', bygroups(Punctuation, Name.Class, Punctuation)), - (r'\{', Punctuation, 'inline_module'), - (r'\.', String.Regex), - ], - 'inline_module': [ - (r'\{', Other, 'ruby'), - (r'\}', Punctuation, '#pop'), - (r'[^{}]+', Other), - ], - 'ruby': [ - (r'\{', Other, '#push'), - (r'\}', Other, '#pop'), - (r'[^{}]+', Other), - ], - 'space': [ - (r'[ \t\n\r]+', Whitespace), - (r'#[^\n]*', Comment.Single), - ], - 'end': [ - (r'end\b', Keyword, '#pop'), - ], - } - - -class TreetopLexer(DelegatingLexer): - """ - A lexer for `Treetop `_ grammars. - - .. versionadded:: 1.6 - """ - - name = 'Treetop' - aliases = ['treetop'] - filenames = ['*.treetop', '*.tt'] - - def __init__(self, **options): - super(TreetopLexer, self).__init__(RubyLexer, TreetopBaseLexer, **options) - - -class EbnfLexer(RegexLexer): - """ - Lexer for `ISO/IEC 14977 EBNF - `_ - grammars. - - .. versionadded:: 2.0 - """ - - name = 'EBNF' - aliases = ['ebnf'] - filenames = ['*.ebnf'] - mimetypes = ['text/x-ebnf'] - - tokens = { - 'root': [ - include('whitespace'), - include('comment_start'), - include('identifier'), - (r'=', Operator, 'production'), - ], - 'production': [ - include('whitespace'), - include('comment_start'), - include('identifier'), - (r'"[^"]*"', String.Double), - (r"'[^']*'", String.Single), - (r'(\?[^?]*\?)', Name.Entity), - (r'[\[\]{}(),|]', Punctuation), - (r'-', Operator), - (r';', Punctuation, '#pop'), - (r'\.', Punctuation, '#pop'), - ], - 'whitespace': [ - (r'\s+', Text), - ], - 'comment_start': [ - (r'\(\*', Comment.Multiline, 'comment'), - ], - 'comment': [ - (r'[^*)]', Comment.Multiline), - include('comment_start'), - (r'\*\)', Comment.Multiline, '#pop'), - (r'[*)]', Comment.Multiline), - ], - 'identifier': [ - (r'([a-zA-Z][\w \-]*)', Keyword), - ], - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.parsers + ~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for parser generators. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, DelegatingLexer, \ + include, bygroups, using +from pygments.token import Punctuation, Other, Text, Comment, Operator, \ + Keyword, Name, String, Number, Whitespace +from pygments.lexers.jvm import JavaLexer +from pygments.lexers.c_cpp import CLexer, CppLexer +from pygments.lexers.objective import ObjectiveCLexer +from pygments.lexers.d import DLexer +from pygments.lexers.dotnet import CSharpLexer +from pygments.lexers.ruby import RubyLexer +from pygments.lexers.python import PythonLexer +from pygments.lexers.perl import PerlLexer + +__all__ = ['RagelLexer', 'RagelEmbeddedLexer', 'RagelCLexer', 'RagelDLexer', + 'RagelCppLexer', 'RagelObjectiveCLexer', 'RagelRubyLexer', + 'RagelJavaLexer', 'AntlrLexer', 'AntlrPythonLexer', + 'AntlrPerlLexer', 'AntlrRubyLexer', 'AntlrCppLexer', + 'AntlrCSharpLexer', 'AntlrObjectiveCLexer', + 'AntlrJavaLexer', 'AntlrActionScriptLexer', + 'TreetopLexer', 'EbnfLexer'] + + +class RagelLexer(RegexLexer): + """ + A pure `Ragel `_ lexer. Use this for + fragments of Ragel. For ``.rl`` files, use RagelEmbeddedLexer instead + (or one of the language-specific subclasses). + + .. versionadded:: 1.1 + """ + + name = 'Ragel' + aliases = ['ragel'] + filenames = [] + + tokens = { + 'whitespace': [ + (r'\s+', Whitespace) + ], + 'comments': [ + (r'\#.*$', Comment), + ], + 'keywords': [ + (r'(access|action|alphtype)\b', Keyword), + (r'(getkey|write|machine|include)\b', Keyword), + (r'(any|ascii|extend|alpha|digit|alnum|lower|upper)\b', Keyword), + (r'(xdigit|cntrl|graph|print|punct|space|zlen|empty)\b', Keyword) + ], + 'numbers': [ + (r'0x[0-9A-Fa-f]+', Number.Hex), + (r'[+-]?[0-9]+', Number.Integer), + ], + 'literals': [ + (r'"(\\\\|\\"|[^"])*"', String), # double quote string + (r"'(\\\\|\\'|[^'])*'", String), # single quote string + (r'\[(\\\\|\\\]|[^\]])*\]', String), # square bracket literals + (r'/(?!\*)(\\\\|\\/|[^/])*/', String.Regex), # regular expressions + ], + 'identifiers': [ + (r'[a-zA-Z_]\w*', Name.Variable), + ], + 'operators': [ + (r',', Operator), # Join + (r'\||&|--?', Operator), # Union, Intersection and Subtraction + (r'\.|<:|:>>?', Operator), # Concatention + (r':', Operator), # Label + (r'->', Operator), # Epsilon Transition + (r'(>|\$|%|<|@|<>)(/|eof\b)', Operator), # EOF Actions + (r'(>|\$|%|<|@|<>)(!|err\b)', Operator), # Global Error Actions + (r'(>|\$|%|<|@|<>)(\^|lerr\b)', Operator), # Local Error Actions + (r'(>|\$|%|<|@|<>)(~|to\b)', Operator), # To-State Actions + (r'(>|\$|%|<|@|<>)(\*|from\b)', Operator), # From-State Actions + (r'>|@|\$|%', Operator), # Transition Actions and Priorities + (r'\*|\?|\+|\{[0-9]*,[0-9]*\}', Operator), # Repetition + (r'!|\^', Operator), # Negation + (r'\(|\)', Operator), # Grouping + ], + 'root': [ + include('literals'), + include('whitespace'), + include('comments'), + include('keywords'), + include('numbers'), + include('identifiers'), + include('operators'), + (r'\{', Punctuation, 'host'), + (r'=', Operator), + (r';', Punctuation), + ], + 'host': [ + (r'(' + r'|'.join(( # keep host code in largest possible chunks + r'[^{}\'"/#]+', # exclude unsafe characters + r'[^\\]\\[{}]', # allow escaped { or } + + # strings and comments may safely contain unsafe characters + r'"(\\\\|\\"|[^"])*"', # double quote string + r"'(\\\\|\\'|[^'])*'", # single quote string + r'//.*$\n?', # single line comment + r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment + r'\#.*$\n?', # ruby comment + + # regular expression: There's no reason for it to start + # with a * and this stops confusion with comments. + r'/(?!\*)(\\\\|\\/|[^/])*/', + + # / is safe now that we've handled regex and javadoc comments + r'/', + )) + r')+', Other), + + (r'\{', Punctuation, '#push'), + (r'\}', Punctuation, '#pop'), + ], + } + + +class RagelEmbeddedLexer(RegexLexer): + """ + A lexer for `Ragel`_ embedded in a host language file. + + This will only highlight Ragel statements. If you want host language + highlighting then call the language-specific Ragel lexer. + + .. versionadded:: 1.1 + """ + + name = 'Embedded Ragel' + aliases = ['ragel-em'] + filenames = ['*.rl'] + + tokens = { + 'root': [ + (r'(' + r'|'.join(( # keep host code in largest possible chunks + r'[^%\'"/#]+', # exclude unsafe characters + r'%(?=[^%]|$)', # a single % sign is okay, just not 2 of them + + # strings and comments may safely contain unsafe characters + r'"(\\\\|\\"|[^"])*"', # double quote string + r"'(\\\\|\\'|[^'])*'", # single quote string + r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment + r'//.*$\n?', # single line comment + r'\#.*$\n?', # ruby/ragel comment + r'/(?!\*)(\\\\|\\/|[^/])*/', # regular expression + + # / is safe now that we've handled regex and javadoc comments + r'/', + )) + r')+', Other), + + # Single Line FSM. + # Please don't put a quoted newline in a single line FSM. + # That's just mean. It will break this. + (r'(%%)(?![{%])(.*)($|;)(\n?)', bygroups(Punctuation, + using(RagelLexer), + Punctuation, Text)), + + # Multi Line FSM. + (r'(%%%%|%%)\{', Punctuation, 'multi-line-fsm'), + ], + 'multi-line-fsm': [ + (r'(' + r'|'.join(( # keep ragel code in largest possible chunks. + r'(' + r'|'.join(( + r'[^}\'"\[/#]', # exclude unsafe characters + r'\}(?=[^%]|$)', # } is okay as long as it's not followed by % + r'\}%(?=[^%]|$)', # ...well, one %'s okay, just not two... + r'[^\\]\\[{}]', # ...and } is okay if it's escaped + + # allow / if it's preceded with one of these symbols + # (ragel EOF actions) + r'(>|\$|%|<|@|<>)/', + + # specifically allow regex followed immediately by * + # so it doesn't get mistaken for a comment + r'/(?!\*)(\\\\|\\/|[^/])*/\*', + + # allow / as long as it's not followed by another / or by a * + r'/(?=[^/*]|$)', + + # We want to match as many of these as we can in one block. + # Not sure if we need the + sign here, + # does it help performance? + )) + r')+', + + # strings and comments may safely contain unsafe characters + r'"(\\\\|\\"|[^"])*"', # double quote string + r"'(\\\\|\\'|[^'])*'", # single quote string + r"\[(\\\\|\\\]|[^\]])*\]", # square bracket literal + r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment + r'//.*$\n?', # single line comment + r'\#.*$\n?', # ruby/ragel comment + )) + r')+', using(RagelLexer)), + + (r'\}%%', Punctuation, '#pop'), + ] + } + + def analyse_text(text): + return '@LANG: indep' in text + + +class RagelRubyLexer(DelegatingLexer): + """ + A lexer for `Ragel`_ in a Ruby host file. + + .. versionadded:: 1.1 + """ + + name = 'Ragel in Ruby Host' + aliases = ['ragel-ruby', 'ragel-rb'] + filenames = ['*.rl'] + + def __init__(self, **options): + super().__init__(RubyLexer, RagelEmbeddedLexer, **options) + + def analyse_text(text): + return '@LANG: ruby' in text + + +class RagelCLexer(DelegatingLexer): + """ + A lexer for `Ragel`_ in a C host file. + + .. versionadded:: 1.1 + """ + + name = 'Ragel in C Host' + aliases = ['ragel-c'] + filenames = ['*.rl'] + + def __init__(self, **options): + super().__init__(CLexer, RagelEmbeddedLexer, **options) + + def analyse_text(text): + return '@LANG: c' in text + + +class RagelDLexer(DelegatingLexer): + """ + A lexer for `Ragel`_ in a D host file. + + .. versionadded:: 1.1 + """ + + name = 'Ragel in D Host' + aliases = ['ragel-d'] + filenames = ['*.rl'] + + def __init__(self, **options): + super().__init__(DLexer, RagelEmbeddedLexer, **options) + + def analyse_text(text): + return '@LANG: d' in text + + +class RagelCppLexer(DelegatingLexer): + """ + A lexer for `Ragel`_ in a CPP host file. + + .. versionadded:: 1.1 + """ + + name = 'Ragel in CPP Host' + aliases = ['ragel-cpp'] + filenames = ['*.rl'] + + def __init__(self, **options): + super().__init__(CppLexer, RagelEmbeddedLexer, **options) + + def analyse_text(text): + return '@LANG: c++' in text + + +class RagelObjectiveCLexer(DelegatingLexer): + """ + A lexer for `Ragel`_ in an Objective C host file. + + .. versionadded:: 1.1 + """ + + name = 'Ragel in Objective C Host' + aliases = ['ragel-objc'] + filenames = ['*.rl'] + + def __init__(self, **options): + super().__init__(ObjectiveCLexer, RagelEmbeddedLexer, **options) + + def analyse_text(text): + return '@LANG: objc' in text + + +class RagelJavaLexer(DelegatingLexer): + """ + A lexer for `Ragel`_ in a Java host file. + + .. versionadded:: 1.1 + """ + + name = 'Ragel in Java Host' + aliases = ['ragel-java'] + filenames = ['*.rl'] + + def __init__(self, **options): + super().__init__(JavaLexer, RagelEmbeddedLexer, **options) + + def analyse_text(text): + return '@LANG: java' in text + + +class AntlrLexer(RegexLexer): + """ + Generic `ANTLR`_ Lexer. + Should not be called directly, instead + use DelegatingLexer for your target language. + + .. versionadded:: 1.1 + + .. _ANTLR: http://www.antlr.org/ + """ + + name = 'ANTLR' + aliases = ['antlr'] + filenames = [] + + _id = r'[A-Za-z]\w*' + _TOKEN_REF = r'[A-Z]\w*' + _RULE_REF = r'[a-z]\w*' + _STRING_LITERAL = r'\'(?:\\\\|\\\'|[^\']*)\'' + _INT = r'[0-9]+' + + tokens = { + 'whitespace': [ + (r'\s+', Whitespace), + ], + 'comments': [ + (r'//.*$', Comment), + (r'/\*(.|\n)*?\*/', Comment), + ], + 'root': [ + include('whitespace'), + include('comments'), + + (r'(lexer|parser|tree)?(\s*)(grammar\b)(\s*)(' + _id + ')(;)', + bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Class, + Punctuation)), + # optionsSpec + (r'options\b', Keyword, 'options'), + # tokensSpec + (r'tokens\b', Keyword, 'tokens'), + # attrScope + (r'(scope)(\s*)(' + _id + r')(\s*)(\{)', + bygroups(Keyword, Whitespace, Name.Variable, Whitespace, + Punctuation), 'action'), + # exception + (r'(catch|finally)\b', Keyword, 'exception'), + # action + (r'(@' + _id + r')(\s*)(::)?(\s*)(' + _id + r')(\s*)(\{)', + bygroups(Name.Label, Whitespace, Punctuation, Whitespace, + Name.Label, Whitespace, Punctuation), 'action'), + # rule + (r'((?:protected|private|public|fragment)\b)?(\s*)(' + _id + ')(!)?', + bygroups(Keyword, Whitespace, Name.Label, Punctuation), + ('rule-alts', 'rule-prelims')), + ], + 'exception': [ + (r'\n', Whitespace, '#pop'), + (r'\s', Whitespace), + include('comments'), + + (r'\[', Punctuation, 'nested-arg-action'), + (r'\{', Punctuation, 'action'), + ], + 'rule-prelims': [ + include('whitespace'), + include('comments'), + + (r'returns\b', Keyword), + (r'\[', Punctuation, 'nested-arg-action'), + (r'\{', Punctuation, 'action'), + # throwsSpec + (r'(throws)(\s+)(' + _id + ')', + bygroups(Keyword, Whitespace, Name.Label)), + (r'(,)(\s*)(' + _id + ')', + bygroups(Punctuation, Whitespace, Name.Label)), # Additional throws + # optionsSpec + (r'options\b', Keyword, 'options'), + # ruleScopeSpec - scope followed by target language code or name of action + # TODO finish implementing other possibilities for scope + # L173 ANTLRv3.g from ANTLR book + (r'(scope)(\s+)(\{)', bygroups(Keyword, Whitespace, Punctuation), + 'action'), + (r'(scope)(\s+)(' + _id + r')(\s*)(;)', + bygroups(Keyword, Whitespace, Name.Label, Whitespace, Punctuation)), + # ruleAction + (r'(@' + _id + r')(\s*)(\{)', + bygroups(Name.Label, Whitespace, Punctuation), 'action'), + # finished prelims, go to rule alts! + (r':', Punctuation, '#pop') + ], + 'rule-alts': [ + include('whitespace'), + include('comments'), + + # These might need to go in a separate 'block' state triggered by ( + (r'options\b', Keyword, 'options'), + (r':', Punctuation), + + # literals + (r"'(\\\\|\\'|[^'])*'", String), + (r'"(\\\\|\\"|[^"])*"', String), + (r'<<([^>]|>[^>])>>', String), + # identifiers + # Tokens start with capital letter. + (r'\$?[A-Z_]\w*', Name.Constant), + # Rules start with small letter. + (r'\$?[a-z_]\w*', Name.Variable), + # operators + (r'(\+|\||->|=>|=|\(|\)|\.\.|\.|\?|\*|\^|!|\#|~)', Operator), + (r',', Punctuation), + (r'\[', Punctuation, 'nested-arg-action'), + (r'\{', Punctuation, 'action'), + (r';', Punctuation, '#pop') + ], + 'tokens': [ + include('whitespace'), + include('comments'), + (r'\{', Punctuation), + (r'(' + _TOKEN_REF + r')(\s*)(=)?(\s*)(' + _STRING_LITERAL + + r')?(\s*)(;)', + bygroups(Name.Label, Whitespace, Punctuation, Whitespace, + String, Whitespace, Punctuation)), + (r'\}', Punctuation, '#pop'), + ], + 'options': [ + include('whitespace'), + include('comments'), + (r'\{', Punctuation), + (r'(' + _id + r')(\s*)(=)(\s*)(' + + '|'.join((_id, _STRING_LITERAL, _INT, r'\*')) + r')(\s*)(;)', + bygroups(Name.Variable, Whitespace, Punctuation, Whitespace, + Text, Whitespace, Punctuation)), + (r'\}', Punctuation, '#pop'), + ], + 'action': [ + (r'(' + r'|'.join(( # keep host code in largest possible chunks + r'[^${}\'"/\\]+', # exclude unsafe characters + + # strings and comments may safely contain unsafe characters + r'"(\\\\|\\"|[^"])*"', # double quote string + r"'(\\\\|\\'|[^'])*'", # single quote string + r'//.*$\n?', # single line comment + r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment + + # regular expression: There's no reason for it to start + # with a * and this stops confusion with comments. + r'/(?!\*)(\\\\|\\/|[^/])*/', + + # backslashes are okay, as long as we are not backslashing a % + r'\\(?!%)', + + # Now that we've handled regex and javadoc comments + # it's safe to let / through. + r'/', + )) + r')+', Other), + (r'(\\)(%)', bygroups(Punctuation, Other)), + (r'(\$[a-zA-Z]+)(\.?)(text|value)?', + bygroups(Name.Variable, Punctuation, Name.Property)), + (r'\{', Punctuation, '#push'), + (r'\}', Punctuation, '#pop'), + ], + 'nested-arg-action': [ + (r'(' + r'|'.join(( # keep host code in largest possible chunks. + r'[^$\[\]\'"/]+', # exclude unsafe characters + + # strings and comments may safely contain unsafe characters + r'"(\\\\|\\"|[^"])*"', # double quote string + r"'(\\\\|\\'|[^'])*'", # single quote string + r'//.*$\n?', # single line comment + r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment + + # regular expression: There's no reason for it to start + # with a * and this stops confusion with comments. + r'/(?!\*)(\\\\|\\/|[^/])*/', + + # Now that we've handled regex and javadoc comments + # it's safe to let / through. + r'/', + )) + r')+', Other), + + + (r'\[', Punctuation, '#push'), + (r'\]', Punctuation, '#pop'), + (r'(\$[a-zA-Z]+)(\.?)(text|value)?', + bygroups(Name.Variable, Punctuation, Name.Property)), + (r'(\\\\|\\\]|\\\[|[^\[\]])+', Other), + ] + } + + def analyse_text(text): + return re.search(r'^\s*grammar\s+[a-zA-Z0-9]+\s*;', text, re.M) + + +# http://www.antlr.org/wiki/display/ANTLR3/Code+Generation+Targets + +class AntlrCppLexer(DelegatingLexer): + """ + `ANTLR`_ with CPP Target + + .. versionadded:: 1.1 + """ + + name = 'ANTLR With CPP Target' + aliases = ['antlr-cpp'] + filenames = ['*.G', '*.g'] + + def __init__(self, **options): + super().__init__(CppLexer, AntlrLexer, **options) + + def analyse_text(text): + return AntlrLexer.analyse_text(text) and \ + re.search(r'^\s*language\s*=\s*C\s*;', text, re.M) + + +class AntlrObjectiveCLexer(DelegatingLexer): + """ + `ANTLR`_ with Objective-C Target + + .. versionadded:: 1.1 + """ + + name = 'ANTLR With ObjectiveC Target' + aliases = ['antlr-objc'] + filenames = ['*.G', '*.g'] + + def __init__(self, **options): + super().__init__(ObjectiveCLexer, AntlrLexer, **options) + + def analyse_text(text): + return AntlrLexer.analyse_text(text) and \ + re.search(r'^\s*language\s*=\s*ObjC\s*;', text) + + +class AntlrCSharpLexer(DelegatingLexer): + """ + `ANTLR`_ with C# Target + + .. versionadded:: 1.1 + """ + + name = 'ANTLR With C# Target' + aliases = ['antlr-csharp', 'antlr-c#'] + filenames = ['*.G', '*.g'] + + def __init__(self, **options): + super().__init__(CSharpLexer, AntlrLexer, **options) + + def analyse_text(text): + return AntlrLexer.analyse_text(text) and \ + re.search(r'^\s*language\s*=\s*CSharp2\s*;', text, re.M) + + +class AntlrPythonLexer(DelegatingLexer): + """ + `ANTLR`_ with Python Target + + .. versionadded:: 1.1 + """ + + name = 'ANTLR With Python Target' + aliases = ['antlr-python'] + filenames = ['*.G', '*.g'] + + def __init__(self, **options): + super().__init__(PythonLexer, AntlrLexer, **options) + + def analyse_text(text): + return AntlrLexer.analyse_text(text) and \ + re.search(r'^\s*language\s*=\s*Python\s*;', text, re.M) + + +class AntlrJavaLexer(DelegatingLexer): + """ + `ANTLR`_ with Java Target + + .. versionadded:: 1. + """ + + name = 'ANTLR With Java Target' + aliases = ['antlr-java'] + filenames = ['*.G', '*.g'] + + def __init__(self, **options): + super().__init__(JavaLexer, AntlrLexer, **options) + + def analyse_text(text): + # Antlr language is Java by default + return AntlrLexer.analyse_text(text) and 0.9 + + +class AntlrRubyLexer(DelegatingLexer): + """ + `ANTLR`_ with Ruby Target + + .. versionadded:: 1.1 + """ + + name = 'ANTLR With Ruby Target' + aliases = ['antlr-ruby', 'antlr-rb'] + filenames = ['*.G', '*.g'] + + def __init__(self, **options): + super().__init__(RubyLexer, AntlrLexer, **options) + + def analyse_text(text): + return AntlrLexer.analyse_text(text) and \ + re.search(r'^\s*language\s*=\s*Ruby\s*;', text, re.M) + + +class AntlrPerlLexer(DelegatingLexer): + """ + `ANTLR`_ with Perl Target + + .. versionadded:: 1.1 + """ + + name = 'ANTLR With Perl Target' + aliases = ['antlr-perl'] + filenames = ['*.G', '*.g'] + + def __init__(self, **options): + super().__init__(PerlLexer, AntlrLexer, **options) + + def analyse_text(text): + return AntlrLexer.analyse_text(text) and \ + re.search(r'^\s*language\s*=\s*Perl5\s*;', text, re.M) + + +class AntlrActionScriptLexer(DelegatingLexer): + """ + `ANTLR`_ with ActionScript Target + + .. versionadded:: 1.1 + """ + + name = 'ANTLR With ActionScript Target' + aliases = ['antlr-as', 'antlr-actionscript'] + filenames = ['*.G', '*.g'] + + def __init__(self, **options): + from pygments.lexers.actionscript import ActionScriptLexer + super().__init__(ActionScriptLexer, AntlrLexer, **options) + + def analyse_text(text): + return AntlrLexer.analyse_text(text) and \ + re.search(r'^\s*language\s*=\s*ActionScript\s*;', text, re.M) + + +class TreetopBaseLexer(RegexLexer): + """ + A base lexer for `Treetop `_ grammars. + Not for direct use; use TreetopLexer instead. + + .. versionadded:: 1.6 + """ + + tokens = { + 'root': [ + include('space'), + (r'require[ \t]+[^\n\r]+[\n\r]', Other), + (r'module\b', Keyword.Namespace, 'module'), + (r'grammar\b', Keyword, 'grammar'), + ], + 'module': [ + include('space'), + include('end'), + (r'module\b', Keyword, '#push'), + (r'grammar\b', Keyword, 'grammar'), + (r'[A-Z]\w*(?:::[A-Z]\w*)*', Name.Namespace), + ], + 'grammar': [ + include('space'), + include('end'), + (r'rule\b', Keyword, 'rule'), + (r'include\b', Keyword, 'include'), + (r'[A-Z]\w*', Name), + ], + 'include': [ + include('space'), + (r'[A-Z]\w*(?:::[A-Z]\w*)*', Name.Class, '#pop'), + ], + 'rule': [ + include('space'), + include('end'), + (r'"(\\\\|\\"|[^"])*"', String.Double), + (r"'(\\\\|\\'|[^'])*'", String.Single), + (r'([A-Za-z_]\w*)(:)', bygroups(Name.Label, Punctuation)), + (r'[A-Za-z_]\w*', Name), + (r'[()]', Punctuation), + (r'[?+*/&!~]', Operator), + (r'\[(?:\\.|\[:\^?[a-z]+:\]|[^\\\]])+\]', String.Regex), + (r'([0-9]*)(\.\.)([0-9]*)', + bygroups(Number.Integer, Operator, Number.Integer)), + (r'(<)([^>]+)(>)', bygroups(Punctuation, Name.Class, Punctuation)), + (r'\{', Punctuation, 'inline_module'), + (r'\.', String.Regex), + ], + 'inline_module': [ + (r'\{', Other, 'ruby'), + (r'\}', Punctuation, '#pop'), + (r'[^{}]+', Other), + ], + 'ruby': [ + (r'\{', Other, '#push'), + (r'\}', Other, '#pop'), + (r'[^{}]+', Other), + ], + 'space': [ + (r'[ \t\n\r]+', Whitespace), + (r'#[^\n]*', Comment.Single), + ], + 'end': [ + (r'end\b', Keyword, '#pop'), + ], + } + + +class TreetopLexer(DelegatingLexer): + """ + A lexer for `Treetop `_ grammars. + + .. versionadded:: 1.6 + """ + + name = 'Treetop' + aliases = ['treetop'] + filenames = ['*.treetop', '*.tt'] + + def __init__(self, **options): + super().__init__(RubyLexer, TreetopBaseLexer, **options) + + +class EbnfLexer(RegexLexer): + """ + Lexer for `ISO/IEC 14977 EBNF + `_ + grammars. + + .. versionadded:: 2.0 + """ + + name = 'EBNF' + aliases = ['ebnf'] + filenames = ['*.ebnf'] + mimetypes = ['text/x-ebnf'] + + tokens = { + 'root': [ + include('whitespace'), + include('comment_start'), + include('identifier'), + (r'=', Operator, 'production'), + ], + 'production': [ + include('whitespace'), + include('comment_start'), + include('identifier'), + (r'"[^"]*"', String.Double), + (r"'[^']*'", String.Single), + (r'(\?[^?]*\?)', Name.Entity), + (r'[\[\]{}(),|]', Punctuation), + (r'-', Operator), + (r';', Punctuation, '#pop'), + (r'\.', Punctuation, '#pop'), + ], + 'whitespace': [ + (r'\s+', Text), + ], + 'comment_start': [ + (r'\(\*', Comment.Multiline, 'comment'), + ], + 'comment': [ + (r'[^*)]', Comment.Multiline), + include('comment_start'), + (r'\*\)', Comment.Multiline, '#pop'), + (r'[*)]', Comment.Multiline), + ], + 'identifier': [ + (r'([a-zA-Z][\w \-]*)', Keyword), + ], + } diff --git a/pygments/lexers/pascal.py b/pygments/lexers/pascal.py old mode 100644 new mode 100755 index d4b43fd..93f7d44 --- a/pygments/lexers/pascal.py +++ b/pygments/lexers/pascal.py @@ -1,644 +1,644 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.pascal - ~~~~~~~~~~~~~~~~~~~~~~ - - Lexers for Pascal family languages. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import Lexer, RegexLexer, include, bygroups, words, \ - using, this, default -from pygments.util import get_bool_opt, get_list_opt -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Error -from pygments.scanner import Scanner - -# compatibility import -from pygments.lexers.modula2 import Modula2Lexer - -__all__ = ['DelphiLexer', 'AdaLexer'] - - -class DelphiLexer(Lexer): - """ - For `Delphi `_ (Borland Object Pascal), - Turbo Pascal and Free Pascal source code. - - Additional options accepted: - - `turbopascal` - Highlight Turbo Pascal specific keywords (default: ``True``). - `delphi` - Highlight Borland Delphi specific keywords (default: ``True``). - `freepascal` - Highlight Free Pascal specific keywords (default: ``True``). - `units` - A list of units that should be considered builtin, supported are - ``System``, ``SysUtils``, ``Classes`` and ``Math``. - Default is to consider all of them builtin. - """ - name = 'Delphi' - aliases = ['delphi', 'pas', 'pascal', 'objectpascal'] - filenames = ['*.pas', '*.dpr'] - mimetypes = ['text/x-pascal'] - - TURBO_PASCAL_KEYWORDS = ( - 'absolute', 'and', 'array', 'asm', 'begin', 'break', 'case', - 'const', 'constructor', 'continue', 'destructor', 'div', 'do', - 'downto', 'else', 'end', 'file', 'for', 'function', 'goto', - 'if', 'implementation', 'in', 'inherited', 'inline', 'interface', - 'label', 'mod', 'nil', 'not', 'object', 'of', 'on', 'operator', - 'or', 'packed', 'procedure', 'program', 'record', 'reintroduce', - 'repeat', 'self', 'set', 'shl', 'shr', 'string', 'then', 'to', - 'type', 'unit', 'until', 'uses', 'var', 'while', 'with', 'xor' - ) - - DELPHI_KEYWORDS = ( - 'as', 'class', 'except', 'exports', 'finalization', 'finally', - 'initialization', 'is', 'library', 'on', 'property', 'raise', - 'threadvar', 'try' - ) - - FREE_PASCAL_KEYWORDS = ( - 'dispose', 'exit', 'false', 'new', 'true' - ) - - BLOCK_KEYWORDS = { - 'begin', 'class', 'const', 'constructor', 'destructor', 'end', - 'finalization', 'function', 'implementation', 'initialization', - 'label', 'library', 'operator', 'procedure', 'program', 'property', - 'record', 'threadvar', 'type', 'unit', 'uses', 'var' - } - - FUNCTION_MODIFIERS = { - 'alias', 'cdecl', 'export', 'inline', 'interrupt', 'nostackframe', - 'pascal', 'register', 'safecall', 'softfloat', 'stdcall', - 'varargs', 'name', 'dynamic', 'near', 'virtual', 'external', - 'override', 'assembler' - } - - # XXX: those aren't global. but currently we know no way for defining - # them just for the type context. - DIRECTIVES = { - 'absolute', 'abstract', 'assembler', 'cppdecl', 'default', 'far', - 'far16', 'forward', 'index', 'oldfpccall', 'private', 'protected', - 'published', 'public' - } - - BUILTIN_TYPES = { - 'ansichar', 'ansistring', 'bool', 'boolean', 'byte', 'bytebool', - 'cardinal', 'char', 'comp', 'currency', 'double', 'dword', - 'extended', 'int64', 'integer', 'iunknown', 'longbool', 'longint', - 'longword', 'pansichar', 'pansistring', 'pbool', 'pboolean', - 'pbyte', 'pbytearray', 'pcardinal', 'pchar', 'pcomp', 'pcurrency', - 'pdate', 'pdatetime', 'pdouble', 'pdword', 'pextended', 'phandle', - 'pint64', 'pinteger', 'plongint', 'plongword', 'pointer', - 'ppointer', 'pshortint', 'pshortstring', 'psingle', 'psmallint', - 'pstring', 'pvariant', 'pwidechar', 'pwidestring', 'pword', - 'pwordarray', 'pwordbool', 'real', 'real48', 'shortint', - 'shortstring', 'single', 'smallint', 'string', 'tclass', 'tdate', - 'tdatetime', 'textfile', 'thandle', 'tobject', 'ttime', 'variant', - 'widechar', 'widestring', 'word', 'wordbool' - } - - BUILTIN_UNITS = { - 'System': ( - 'abs', 'acquireexceptionobject', 'addr', 'ansitoutf8', - 'append', 'arctan', 'assert', 'assigned', 'assignfile', - 'beginthread', 'blockread', 'blockwrite', 'break', 'chdir', - 'chr', 'close', 'closefile', 'comptocurrency', 'comptodouble', - 'concat', 'continue', 'copy', 'cos', 'dec', 'delete', - 'dispose', 'doubletocomp', 'endthread', 'enummodules', - 'enumresourcemodules', 'eof', 'eoln', 'erase', 'exceptaddr', - 'exceptobject', 'exclude', 'exit', 'exp', 'filepos', 'filesize', - 'fillchar', 'finalize', 'findclasshinstance', 'findhinstance', - 'findresourcehinstance', 'flush', 'frac', 'freemem', - 'get8087cw', 'getdir', 'getlasterror', 'getmem', - 'getmemorymanager', 'getmodulefilename', 'getvariantmanager', - 'halt', 'hi', 'high', 'inc', 'include', 'initialize', 'insert', - 'int', 'ioresult', 'ismemorymanagerset', 'isvariantmanagerset', - 'length', 'ln', 'lo', 'low', 'mkdir', 'move', 'new', 'odd', - 'olestrtostring', 'olestrtostrvar', 'ord', 'paramcount', - 'paramstr', 'pi', 'pos', 'pred', 'ptr', 'pucs4chars', 'random', - 'randomize', 'read', 'readln', 'reallocmem', - 'releaseexceptionobject', 'rename', 'reset', 'rewrite', 'rmdir', - 'round', 'runerror', 'seek', 'seekeof', 'seekeoln', - 'set8087cw', 'setlength', 'setlinebreakstyle', - 'setmemorymanager', 'setstring', 'settextbuf', - 'setvariantmanager', 'sin', 'sizeof', 'slice', 'sqr', 'sqrt', - 'str', 'stringofchar', 'stringtoolestr', 'stringtowidechar', - 'succ', 'swap', 'trunc', 'truncate', 'typeinfo', - 'ucs4stringtowidestring', 'unicodetoutf8', 'uniquestring', - 'upcase', 'utf8decode', 'utf8encode', 'utf8toansi', - 'utf8tounicode', 'val', 'vararrayredim', 'varclear', - 'widecharlentostring', 'widecharlentostrvar', - 'widechartostring', 'widechartostrvar', - 'widestringtoucs4string', 'write', 'writeln' - ), - 'SysUtils': ( - 'abort', 'addexitproc', 'addterminateproc', 'adjustlinebreaks', - 'allocmem', 'ansicomparefilename', 'ansicomparestr', - 'ansicomparetext', 'ansidequotedstr', 'ansiextractquotedstr', - 'ansilastchar', 'ansilowercase', 'ansilowercasefilename', - 'ansipos', 'ansiquotedstr', 'ansisamestr', 'ansisametext', - 'ansistrcomp', 'ansistricomp', 'ansistrlastchar', 'ansistrlcomp', - 'ansistrlicomp', 'ansistrlower', 'ansistrpos', 'ansistrrscan', - 'ansistrscan', 'ansistrupper', 'ansiuppercase', - 'ansiuppercasefilename', 'appendstr', 'assignstr', 'beep', - 'booltostr', 'bytetocharindex', 'bytetocharlen', 'bytetype', - 'callterminateprocs', 'changefileext', 'charlength', - 'chartobyteindex', 'chartobytelen', 'comparemem', 'comparestr', - 'comparetext', 'createdir', 'createguid', 'currentyear', - 'currtostr', 'currtostrf', 'date', 'datetimetofiledate', - 'datetimetostr', 'datetimetostring', 'datetimetosystemtime', - 'datetimetotimestamp', 'datetostr', 'dayofweek', 'decodedate', - 'decodedatefully', 'decodetime', 'deletefile', 'directoryexists', - 'diskfree', 'disksize', 'disposestr', 'encodedate', 'encodetime', - 'exceptionerrormessage', 'excludetrailingbackslash', - 'excludetrailingpathdelimiter', 'expandfilename', - 'expandfilenamecase', 'expanduncfilename', 'extractfiledir', - 'extractfiledrive', 'extractfileext', 'extractfilename', - 'extractfilepath', 'extractrelativepath', 'extractshortpathname', - 'fileage', 'fileclose', 'filecreate', 'filedatetodatetime', - 'fileexists', 'filegetattr', 'filegetdate', 'fileisreadonly', - 'fileopen', 'fileread', 'filesearch', 'fileseek', 'filesetattr', - 'filesetdate', 'filesetreadonly', 'filewrite', 'finalizepackage', - 'findclose', 'findcmdlineswitch', 'findfirst', 'findnext', - 'floattocurr', 'floattodatetime', 'floattodecimal', 'floattostr', - 'floattostrf', 'floattotext', 'floattotextfmt', 'fmtloadstr', - 'fmtstr', 'forcedirectories', 'format', 'formatbuf', 'formatcurr', - 'formatdatetime', 'formatfloat', 'freeandnil', 'getcurrentdir', - 'getenvironmentvariable', 'getfileversion', 'getformatsettings', - 'getlocaleformatsettings', 'getmodulename', 'getpackagedescription', - 'getpackageinfo', 'gettime', 'guidtostring', 'incamonth', - 'includetrailingbackslash', 'includetrailingpathdelimiter', - 'incmonth', 'initializepackage', 'interlockeddecrement', - 'interlockedexchange', 'interlockedexchangeadd', - 'interlockedincrement', 'inttohex', 'inttostr', 'isdelimiter', - 'isequalguid', 'isleapyear', 'ispathdelimiter', 'isvalidident', - 'languages', 'lastdelimiter', 'loadpackage', 'loadstr', - 'lowercase', 'msecstotimestamp', 'newstr', 'nextcharindex', 'now', - 'outofmemoryerror', 'quotedstr', 'raiselastoserror', - 'raiselastwin32error', 'removedir', 'renamefile', 'replacedate', - 'replacetime', 'safeloadlibrary', 'samefilename', 'sametext', - 'setcurrentdir', 'showexception', 'sleep', 'stralloc', 'strbufsize', - 'strbytetype', 'strcat', 'strcharlength', 'strcomp', 'strcopy', - 'strdispose', 'strecopy', 'strend', 'strfmt', 'stricomp', - 'stringreplace', 'stringtoguid', 'strlcat', 'strlcomp', 'strlcopy', - 'strlen', 'strlfmt', 'strlicomp', 'strlower', 'strmove', 'strnew', - 'strnextchar', 'strpas', 'strpcopy', 'strplcopy', 'strpos', - 'strrscan', 'strscan', 'strtobool', 'strtobooldef', 'strtocurr', - 'strtocurrdef', 'strtodate', 'strtodatedef', 'strtodatetime', - 'strtodatetimedef', 'strtofloat', 'strtofloatdef', 'strtoint', - 'strtoint64', 'strtoint64def', 'strtointdef', 'strtotime', - 'strtotimedef', 'strupper', 'supports', 'syserrormessage', - 'systemtimetodatetime', 'texttofloat', 'time', 'timestamptodatetime', - 'timestamptomsecs', 'timetostr', 'trim', 'trimleft', 'trimright', - 'tryencodedate', 'tryencodetime', 'tryfloattocurr', 'tryfloattodatetime', - 'trystrtobool', 'trystrtocurr', 'trystrtodate', 'trystrtodatetime', - 'trystrtofloat', 'trystrtoint', 'trystrtoint64', 'trystrtotime', - 'unloadpackage', 'uppercase', 'widecomparestr', 'widecomparetext', - 'widefmtstr', 'wideformat', 'wideformatbuf', 'widelowercase', - 'widesamestr', 'widesametext', 'wideuppercase', 'win32check', - 'wraptext' - ), - 'Classes': ( - 'activateclassgroup', 'allocatehwnd', 'bintohex', 'checksynchronize', - 'collectionsequal', 'countgenerations', 'deallocatehwnd', 'equalrect', - 'extractstrings', 'findclass', 'findglobalcomponent', 'getclass', - 'groupdescendantswith', 'hextobin', 'identtoint', - 'initinheritedcomponent', 'inttoident', 'invalidpoint', - 'isuniqueglobalcomponentname', 'linestart', 'objectbinarytotext', - 'objectresourcetotext', 'objecttexttobinary', 'objecttexttoresource', - 'pointsequal', 'readcomponentres', 'readcomponentresex', - 'readcomponentresfile', 'rect', 'registerclass', 'registerclassalias', - 'registerclasses', 'registercomponents', 'registerintegerconsts', - 'registernoicon', 'registernonactivex', 'smallpoint', 'startclassgroup', - 'teststreamformat', 'unregisterclass', 'unregisterclasses', - 'unregisterintegerconsts', 'unregistermoduleclasses', - 'writecomponentresfile' - ), - 'Math': ( - 'arccos', 'arccosh', 'arccot', 'arccoth', 'arccsc', 'arccsch', 'arcsec', - 'arcsech', 'arcsin', 'arcsinh', 'arctan2', 'arctanh', 'ceil', - 'comparevalue', 'cosecant', 'cosh', 'cot', 'cotan', 'coth', 'csc', - 'csch', 'cycletodeg', 'cycletograd', 'cycletorad', 'degtocycle', - 'degtograd', 'degtorad', 'divmod', 'doubledecliningbalance', - 'ensurerange', 'floor', 'frexp', 'futurevalue', 'getexceptionmask', - 'getprecisionmode', 'getroundmode', 'gradtocycle', 'gradtodeg', - 'gradtorad', 'hypot', 'inrange', 'interestpayment', 'interestrate', - 'internalrateofreturn', 'intpower', 'isinfinite', 'isnan', 'iszero', - 'ldexp', 'lnxp1', 'log10', 'log2', 'logn', 'max', 'maxintvalue', - 'maxvalue', 'mean', 'meanandstddev', 'min', 'minintvalue', 'minvalue', - 'momentskewkurtosis', 'netpresentvalue', 'norm', 'numberofperiods', - 'payment', 'periodpayment', 'poly', 'popnstddev', 'popnvariance', - 'power', 'presentvalue', 'radtocycle', 'radtodeg', 'radtograd', - 'randg', 'randomrange', 'roundto', 'samevalue', 'sec', 'secant', - 'sech', 'setexceptionmask', 'setprecisionmode', 'setroundmode', - 'sign', 'simpleroundto', 'sincos', 'sinh', 'slndepreciation', 'stddev', - 'sum', 'sumint', 'sumofsquares', 'sumsandsquares', 'syddepreciation', - 'tan', 'tanh', 'totalvariance', 'variance' - ) - } - - ASM_REGISTERS = { - 'ah', 'al', 'ax', 'bh', 'bl', 'bp', 'bx', 'ch', 'cl', 'cr0', - 'cr1', 'cr2', 'cr3', 'cr4', 'cs', 'cx', 'dh', 'di', 'dl', 'dr0', - 'dr1', 'dr2', 'dr3', 'dr4', 'dr5', 'dr6', 'dr7', 'ds', 'dx', - 'eax', 'ebp', 'ebx', 'ecx', 'edi', 'edx', 'es', 'esi', 'esp', - 'fs', 'gs', 'mm0', 'mm1', 'mm2', 'mm3', 'mm4', 'mm5', 'mm6', - 'mm7', 'si', 'sp', 'ss', 'st0', 'st1', 'st2', 'st3', 'st4', 'st5', - 'st6', 'st7', 'xmm0', 'xmm1', 'xmm2', 'xmm3', 'xmm4', 'xmm5', - 'xmm6', 'xmm7' - } - - ASM_INSTRUCTIONS = { - 'aaa', 'aad', 'aam', 'aas', 'adc', 'add', 'and', 'arpl', 'bound', - 'bsf', 'bsr', 'bswap', 'bt', 'btc', 'btr', 'bts', 'call', 'cbw', - 'cdq', 'clc', 'cld', 'cli', 'clts', 'cmc', 'cmova', 'cmovae', - 'cmovb', 'cmovbe', 'cmovc', 'cmovcxz', 'cmove', 'cmovg', - 'cmovge', 'cmovl', 'cmovle', 'cmovna', 'cmovnae', 'cmovnb', - 'cmovnbe', 'cmovnc', 'cmovne', 'cmovng', 'cmovnge', 'cmovnl', - 'cmovnle', 'cmovno', 'cmovnp', 'cmovns', 'cmovnz', 'cmovo', - 'cmovp', 'cmovpe', 'cmovpo', 'cmovs', 'cmovz', 'cmp', 'cmpsb', - 'cmpsd', 'cmpsw', 'cmpxchg', 'cmpxchg486', 'cmpxchg8b', 'cpuid', - 'cwd', 'cwde', 'daa', 'das', 'dec', 'div', 'emms', 'enter', 'hlt', - 'ibts', 'icebp', 'idiv', 'imul', 'in', 'inc', 'insb', 'insd', - 'insw', 'int', 'int01', 'int03', 'int1', 'int3', 'into', 'invd', - 'invlpg', 'iret', 'iretd', 'iretw', 'ja', 'jae', 'jb', 'jbe', - 'jc', 'jcxz', 'jcxz', 'je', 'jecxz', 'jg', 'jge', 'jl', 'jle', - 'jmp', 'jna', 'jnae', 'jnb', 'jnbe', 'jnc', 'jne', 'jng', 'jnge', - 'jnl', 'jnle', 'jno', 'jnp', 'jns', 'jnz', 'jo', 'jp', 'jpe', - 'jpo', 'js', 'jz', 'lahf', 'lar', 'lcall', 'lds', 'lea', 'leave', - 'les', 'lfs', 'lgdt', 'lgs', 'lidt', 'ljmp', 'lldt', 'lmsw', - 'loadall', 'loadall286', 'lock', 'lodsb', 'lodsd', 'lodsw', - 'loop', 'loope', 'loopne', 'loopnz', 'loopz', 'lsl', 'lss', 'ltr', - 'mov', 'movd', 'movq', 'movsb', 'movsd', 'movsw', 'movsx', - 'movzx', 'mul', 'neg', 'nop', 'not', 'or', 'out', 'outsb', 'outsd', - 'outsw', 'pop', 'popa', 'popad', 'popaw', 'popf', 'popfd', 'popfw', - 'push', 'pusha', 'pushad', 'pushaw', 'pushf', 'pushfd', 'pushfw', - 'rcl', 'rcr', 'rdmsr', 'rdpmc', 'rdshr', 'rdtsc', 'rep', 'repe', - 'repne', 'repnz', 'repz', 'ret', 'retf', 'retn', 'rol', 'ror', - 'rsdc', 'rsldt', 'rsm', 'sahf', 'sal', 'salc', 'sar', 'sbb', - 'scasb', 'scasd', 'scasw', 'seta', 'setae', 'setb', 'setbe', - 'setc', 'setcxz', 'sete', 'setg', 'setge', 'setl', 'setle', - 'setna', 'setnae', 'setnb', 'setnbe', 'setnc', 'setne', 'setng', - 'setnge', 'setnl', 'setnle', 'setno', 'setnp', 'setns', 'setnz', - 'seto', 'setp', 'setpe', 'setpo', 'sets', 'setz', 'sgdt', 'shl', - 'shld', 'shr', 'shrd', 'sidt', 'sldt', 'smi', 'smint', 'smintold', - 'smsw', 'stc', 'std', 'sti', 'stosb', 'stosd', 'stosw', 'str', - 'sub', 'svdc', 'svldt', 'svts', 'syscall', 'sysenter', 'sysexit', - 'sysret', 'test', 'ud1', 'ud2', 'umov', 'verr', 'verw', 'wait', - 'wbinvd', 'wrmsr', 'wrshr', 'xadd', 'xbts', 'xchg', 'xlat', - 'xlatb', 'xor' - } - - def __init__(self, **options): - Lexer.__init__(self, **options) - self.keywords = set() - if get_bool_opt(options, 'turbopascal', True): - self.keywords.update(self.TURBO_PASCAL_KEYWORDS) - if get_bool_opt(options, 'delphi', True): - self.keywords.update(self.DELPHI_KEYWORDS) - if get_bool_opt(options, 'freepascal', True): - self.keywords.update(self.FREE_PASCAL_KEYWORDS) - self.builtins = set() - for unit in get_list_opt(options, 'units', list(self.BUILTIN_UNITS)): - self.builtins.update(self.BUILTIN_UNITS[unit]) - - def get_tokens_unprocessed(self, text): - scanner = Scanner(text, re.DOTALL | re.MULTILINE | re.IGNORECASE) - stack = ['initial'] - in_function_block = False - in_property_block = False - was_dot = False - next_token_is_function = False - next_token_is_property = False - collect_labels = False - block_labels = set() - brace_balance = [0, 0] - - while not scanner.eos: - token = Error - - if stack[-1] == 'initial': - if scanner.scan(r'\s+'): - token = Text - elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'): - if scanner.match.startswith('$'): - token = Comment.Preproc - else: - token = Comment.Multiline - elif scanner.scan(r'//.*?$'): - token = Comment.Single - elif scanner.scan(r'[-+*\/=<>:;,.@\^]'): - token = Operator - # stop label highlighting on next ";" - if collect_labels and scanner.match == ';': - collect_labels = False - elif scanner.scan(r'[\(\)\[\]]+'): - token = Punctuation - # abort function naming ``foo = Function(...)`` - next_token_is_function = False - # if we are in a function block we count the open - # braces because ootherwise it's impossible to - # determine the end of the modifier context - if in_function_block or in_property_block: - if scanner.match == '(': - brace_balance[0] += 1 - elif scanner.match == ')': - brace_balance[0] -= 1 - elif scanner.match == '[': - brace_balance[1] += 1 - elif scanner.match == ']': - brace_balance[1] -= 1 - elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'): - lowercase_name = scanner.match.lower() - if lowercase_name == 'result': - token = Name.Builtin.Pseudo - elif lowercase_name in self.keywords: - token = Keyword - # if we are in a special block and a - # block ending keyword occours (and the parenthesis - # is balanced) we end the current block context - if (in_function_block or in_property_block) and \ - lowercase_name in self.BLOCK_KEYWORDS and \ - brace_balance[0] <= 0 and \ - brace_balance[1] <= 0: - in_function_block = False - in_property_block = False - brace_balance = [0, 0] - block_labels = set() - if lowercase_name in ('label', 'goto'): - collect_labels = True - elif lowercase_name == 'asm': - stack.append('asm') - elif lowercase_name == 'property': - in_property_block = True - next_token_is_property = True - elif lowercase_name in ('procedure', 'operator', - 'function', 'constructor', - 'destructor'): - in_function_block = True - next_token_is_function = True - # we are in a function block and the current name - # is in the set of registered modifiers. highlight - # it as pseudo keyword - elif in_function_block and \ - lowercase_name in self.FUNCTION_MODIFIERS: - token = Keyword.Pseudo - # if we are in a property highlight some more - # modifiers - elif in_property_block and \ - lowercase_name in ('read', 'write'): - token = Keyword.Pseudo - next_token_is_function = True - # if the last iteration set next_token_is_function - # to true we now want this name highlighted as - # function. so do that and reset the state - elif next_token_is_function: - # Look if the next token is a dot. If yes it's - # not a function, but a class name and the - # part after the dot a function name - if scanner.test(r'\s*\.\s*'): - token = Name.Class - # it's not a dot, our job is done - else: - token = Name.Function - next_token_is_function = False - # same for properties - elif next_token_is_property: - token = Name.Property - next_token_is_property = False - # Highlight this token as label and add it - # to the list of known labels - elif collect_labels: - token = Name.Label - block_labels.add(scanner.match.lower()) - # name is in list of known labels - elif lowercase_name in block_labels: - token = Name.Label - elif lowercase_name in self.BUILTIN_TYPES: - token = Keyword.Type - elif lowercase_name in self.DIRECTIVES: - token = Keyword.Pseudo - # builtins are just builtins if the token - # before isn't a dot - elif not was_dot and lowercase_name in self.builtins: - token = Name.Builtin - else: - token = Name - elif scanner.scan(r"'"): - token = String - stack.append('string') - elif scanner.scan(r'\#(\d+|\$[0-9A-Fa-f]+)'): - token = String.Char - elif scanner.scan(r'\$[0-9A-Fa-f]+'): - token = Number.Hex - elif scanner.scan(r'\d+(?![eE]|\.[^.])'): - token = Number.Integer - elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'): - token = Number.Float - else: - # if the stack depth is deeper than once, pop - if len(stack) > 1: - stack.pop() - scanner.get_char() - - elif stack[-1] == 'string': - if scanner.scan(r"''"): - token = String.Escape - elif scanner.scan(r"'"): - token = String - stack.pop() - elif scanner.scan(r"[^']*"): - token = String - else: - scanner.get_char() - stack.pop() - - elif stack[-1] == 'asm': - if scanner.scan(r'\s+'): - token = Text - elif scanner.scan(r'end'): - token = Keyword - stack.pop() - elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'): - if scanner.match.startswith('$'): - token = Comment.Preproc - else: - token = Comment.Multiline - elif scanner.scan(r'//.*?$'): - token = Comment.Single - elif scanner.scan(r"'"): - token = String - stack.append('string') - elif scanner.scan(r'@@[A-Za-z_][A-Za-z_0-9]*'): - token = Name.Label - elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'): - lowercase_name = scanner.match.lower() - if lowercase_name in self.ASM_INSTRUCTIONS: - token = Keyword - elif lowercase_name in self.ASM_REGISTERS: - token = Name.Builtin - else: - token = Name - elif scanner.scan(r'[-+*\/=<>:;,.@\^]+'): - token = Operator - elif scanner.scan(r'[\(\)\[\]]+'): - token = Punctuation - elif scanner.scan(r'\$[0-9A-Fa-f]+'): - token = Number.Hex - elif scanner.scan(r'\d+(?![eE]|\.[^.])'): - token = Number.Integer - elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'): - token = Number.Float - else: - scanner.get_char() - stack.pop() - - # save the dot!!!11 - if scanner.match.strip(): - was_dot = scanner.match == '.' - yield scanner.start_pos, token, scanner.match or '' - - -class AdaLexer(RegexLexer): - """ - For Ada source code. - - .. versionadded:: 1.3 - """ - - name = 'Ada' - aliases = ['ada', 'ada95', 'ada2005'] - filenames = ['*.adb', '*.ads', '*.ada'] - mimetypes = ['text/x-ada'] - - flags = re.MULTILINE | re.IGNORECASE - - tokens = { - 'root': [ - (r'[^\S\n]+', Text), - (r'--.*?\n', Comment.Single), - (r'[^\S\n]+', Text), - (r'function|procedure|entry', Keyword.Declaration, 'subprogram'), - (r'(subtype|type)(\s+)(\w+)', - bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'), - (r'task|protected', Keyword.Declaration), - (r'(subtype)(\s+)', bygroups(Keyword.Declaration, Text)), - (r'(end)(\s+)', bygroups(Keyword.Reserved, Text), 'end'), - (r'(pragma)(\s+)(\w+)', bygroups(Keyword.Reserved, Text, - Comment.Preproc)), - (r'(true|false|null)\b', Keyword.Constant), - (words(( - 'Address', 'Byte', 'Boolean', 'Character', 'Controlled', 'Count', - 'Cursor', 'Duration', 'File_Mode', 'File_Type', 'Float', 'Generator', - 'Integer', 'Long_Float', 'Long_Integer', 'Long_Long_Float', - 'Long_Long_Integer', 'Natural', 'Positive', 'Reference_Type', - 'Short_Float', 'Short_Integer', 'Short_Short_Float', - 'Short_Short_Integer', 'String', 'Wide_Character', 'Wide_String'), - suffix=r'\b'), - Keyword.Type), - (r'(and(\s+then)?|in|mod|not|or(\s+else)|rem)\b', Operator.Word), - (r'generic|private', Keyword.Declaration), - (r'package', Keyword.Declaration, 'package'), - (r'array\b', Keyword.Reserved, 'array_def'), - (r'(with|use)(\s+)', bygroups(Keyword.Namespace, Text), 'import'), - (r'(\w+)(\s*)(:)(\s*)(constant)', - bygroups(Name.Constant, Text, Punctuation, Text, - Keyword.Reserved)), - (r'<<\w+>>', Name.Label), - (r'(\w+)(\s*)(:)(\s*)(declare|begin|loop|for|while)', - bygroups(Name.Label, Text, Punctuation, Text, Keyword.Reserved)), - (words(( - 'abort', 'abs', 'abstract', 'accept', 'access', 'aliased', 'all', - 'array', 'at', 'begin', 'body', 'case', 'constant', 'declare', - 'delay', 'delta', 'digits', 'do', 'else', 'elsif', 'end', 'entry', - 'exception', 'exit', 'interface', 'for', 'goto', 'if', 'is', 'limited', - 'loop', 'new', 'null', 'of', 'or', 'others', 'out', 'overriding', - 'pragma', 'protected', 'raise', 'range', 'record', 'renames', 'requeue', - 'return', 'reverse', 'select', 'separate', 'subtype', 'synchronized', - 'task', 'tagged', 'terminate', 'then', 'type', 'until', 'when', - 'while', 'xor'), prefix=r'\b', suffix=r'\b'), - Keyword.Reserved), - (r'"[^"]*"', String), - include('attribute'), - include('numbers'), - (r"'[^']'", String.Character), - (r'(\w+)(\s*|[(,])', bygroups(Name, using(this))), - (r"(<>|=>|:=|[()|:;,.'])", Punctuation), - (r'[*<>+=/&-]', Operator), - (r'\n+', Text), - ], - 'numbers': [ - (r'[0-9_]+#[0-9a-f_\.]+#', Number.Hex), - (r'[0-9_]+\.[0-9_]*', Number.Float), - (r'[0-9_]+', Number.Integer), - ], - 'attribute': [ - (r"(')(\w+)", bygroups(Punctuation, Name.Attribute)), - ], - 'subprogram': [ - (r'\(', Punctuation, ('#pop', 'formal_part')), - (r';', Punctuation, '#pop'), - (r'is\b', Keyword.Reserved, '#pop'), - (r'"[^"]+"|\w+', Name.Function), - include('root'), - ], - 'end': [ - ('(if|case|record|loop|select)', Keyword.Reserved), - (r'"[^"]+"|[\w.]+', Name.Function), - (r'\s+', Text), - (';', Punctuation, '#pop'), - ], - 'type_def': [ - (r';', Punctuation, '#pop'), - (r'\(', Punctuation, 'formal_part'), - (r'with|and|use', Keyword.Reserved), - (r'array\b', Keyword.Reserved, ('#pop', 'array_def')), - (r'record\b', Keyword.Reserved, ('record_def')), - (r'(null record)(;)', bygroups(Keyword.Reserved, Punctuation), '#pop'), - include('root'), - ], - 'array_def': [ - (r';', Punctuation, '#pop'), - (r'(\w+)(\s+)(range)', bygroups(Keyword.Type, Text, Keyword.Reserved)), - include('root'), - ], - 'record_def': [ - (r'end record', Keyword.Reserved, '#pop'), - include('root'), - ], - 'import': [ - (r'[\w.]+', Name.Namespace, '#pop'), - default('#pop'), - ], - 'formal_part': [ - (r'\)', Punctuation, '#pop'), - (r'\w+', Name.Variable), - (r',|:[^=]', Punctuation), - (r'(in|not|null|out|access)\b', Keyword.Reserved), - include('root'), - ], - 'package': [ - ('body', Keyword.Declaration), - (r'is\s+new|renames', Keyword.Reserved), - ('is', Keyword.Reserved, '#pop'), - (';', Punctuation, '#pop'), - (r'\(', Punctuation, 'package_instantiation'), - (r'([\w.]+)', Name.Class), - include('root'), - ], - 'package_instantiation': [ - (r'("[^"]+"|\w+)(\s+)(=>)', bygroups(Name.Variable, Text, Punctuation)), - (r'[\w.\'"]', Text), - (r'\)', Punctuation, '#pop'), - include('root'), - ], - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.pascal + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Pascal family languages. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import Lexer, RegexLexer, include, bygroups, words, \ + using, this, default +from pygments.util import get_bool_opt, get_list_opt +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error +from pygments.scanner import Scanner + +# compatibility import +from pygments.lexers.modula2 import Modula2Lexer + +__all__ = ['DelphiLexer', 'AdaLexer'] + + +class DelphiLexer(Lexer): + """ + For `Delphi `_ (Borland Object Pascal), + Turbo Pascal and Free Pascal source code. + + Additional options accepted: + + `turbopascal` + Highlight Turbo Pascal specific keywords (default: ``True``). + `delphi` + Highlight Borland Delphi specific keywords (default: ``True``). + `freepascal` + Highlight Free Pascal specific keywords (default: ``True``). + `units` + A list of units that should be considered builtin, supported are + ``System``, ``SysUtils``, ``Classes`` and ``Math``. + Default is to consider all of them builtin. + """ + name = 'Delphi' + aliases = ['delphi', 'pas', 'pascal', 'objectpascal'] + filenames = ['*.pas', '*.dpr'] + mimetypes = ['text/x-pascal'] + + TURBO_PASCAL_KEYWORDS = ( + 'absolute', 'and', 'array', 'asm', 'begin', 'break', 'case', + 'const', 'constructor', 'continue', 'destructor', 'div', 'do', + 'downto', 'else', 'end', 'file', 'for', 'function', 'goto', + 'if', 'implementation', 'in', 'inherited', 'inline', 'interface', + 'label', 'mod', 'nil', 'not', 'object', 'of', 'on', 'operator', + 'or', 'packed', 'procedure', 'program', 'record', 'reintroduce', + 'repeat', 'self', 'set', 'shl', 'shr', 'string', 'then', 'to', + 'type', 'unit', 'until', 'uses', 'var', 'while', 'with', 'xor' + ) + + DELPHI_KEYWORDS = ( + 'as', 'class', 'except', 'exports', 'finalization', 'finally', + 'initialization', 'is', 'library', 'on', 'property', 'raise', + 'threadvar', 'try' + ) + + FREE_PASCAL_KEYWORDS = ( + 'dispose', 'exit', 'false', 'new', 'true' + ) + + BLOCK_KEYWORDS = { + 'begin', 'class', 'const', 'constructor', 'destructor', 'end', + 'finalization', 'function', 'implementation', 'initialization', + 'label', 'library', 'operator', 'procedure', 'program', 'property', + 'record', 'threadvar', 'type', 'unit', 'uses', 'var' + } + + FUNCTION_MODIFIERS = { + 'alias', 'cdecl', 'export', 'inline', 'interrupt', 'nostackframe', + 'pascal', 'register', 'safecall', 'softfloat', 'stdcall', + 'varargs', 'name', 'dynamic', 'near', 'virtual', 'external', + 'override', 'assembler' + } + + # XXX: those aren't global. but currently we know no way for defining + # them just for the type context. + DIRECTIVES = { + 'absolute', 'abstract', 'assembler', 'cppdecl', 'default', 'far', + 'far16', 'forward', 'index', 'oldfpccall', 'private', 'protected', + 'published', 'public' + } + + BUILTIN_TYPES = { + 'ansichar', 'ansistring', 'bool', 'boolean', 'byte', 'bytebool', + 'cardinal', 'char', 'comp', 'currency', 'double', 'dword', + 'extended', 'int64', 'integer', 'iunknown', 'longbool', 'longint', + 'longword', 'pansichar', 'pansistring', 'pbool', 'pboolean', + 'pbyte', 'pbytearray', 'pcardinal', 'pchar', 'pcomp', 'pcurrency', + 'pdate', 'pdatetime', 'pdouble', 'pdword', 'pextended', 'phandle', + 'pint64', 'pinteger', 'plongint', 'plongword', 'pointer', + 'ppointer', 'pshortint', 'pshortstring', 'psingle', 'psmallint', + 'pstring', 'pvariant', 'pwidechar', 'pwidestring', 'pword', + 'pwordarray', 'pwordbool', 'real', 'real48', 'shortint', + 'shortstring', 'single', 'smallint', 'string', 'tclass', 'tdate', + 'tdatetime', 'textfile', 'thandle', 'tobject', 'ttime', 'variant', + 'widechar', 'widestring', 'word', 'wordbool' + } + + BUILTIN_UNITS = { + 'System': ( + 'abs', 'acquireexceptionobject', 'addr', 'ansitoutf8', + 'append', 'arctan', 'assert', 'assigned', 'assignfile', + 'beginthread', 'blockread', 'blockwrite', 'break', 'chdir', + 'chr', 'close', 'closefile', 'comptocurrency', 'comptodouble', + 'concat', 'continue', 'copy', 'cos', 'dec', 'delete', + 'dispose', 'doubletocomp', 'endthread', 'enummodules', + 'enumresourcemodules', 'eof', 'eoln', 'erase', 'exceptaddr', + 'exceptobject', 'exclude', 'exit', 'exp', 'filepos', 'filesize', + 'fillchar', 'finalize', 'findclasshinstance', 'findhinstance', + 'findresourcehinstance', 'flush', 'frac', 'freemem', + 'get8087cw', 'getdir', 'getlasterror', 'getmem', + 'getmemorymanager', 'getmodulefilename', 'getvariantmanager', + 'halt', 'hi', 'high', 'inc', 'include', 'initialize', 'insert', + 'int', 'ioresult', 'ismemorymanagerset', 'isvariantmanagerset', + 'length', 'ln', 'lo', 'low', 'mkdir', 'move', 'new', 'odd', + 'olestrtostring', 'olestrtostrvar', 'ord', 'paramcount', + 'paramstr', 'pi', 'pos', 'pred', 'ptr', 'pucs4chars', 'random', + 'randomize', 'read', 'readln', 'reallocmem', + 'releaseexceptionobject', 'rename', 'reset', 'rewrite', 'rmdir', + 'round', 'runerror', 'seek', 'seekeof', 'seekeoln', + 'set8087cw', 'setlength', 'setlinebreakstyle', + 'setmemorymanager', 'setstring', 'settextbuf', + 'setvariantmanager', 'sin', 'sizeof', 'slice', 'sqr', 'sqrt', + 'str', 'stringofchar', 'stringtoolestr', 'stringtowidechar', + 'succ', 'swap', 'trunc', 'truncate', 'typeinfo', + 'ucs4stringtowidestring', 'unicodetoutf8', 'uniquestring', + 'upcase', 'utf8decode', 'utf8encode', 'utf8toansi', + 'utf8tounicode', 'val', 'vararrayredim', 'varclear', + 'widecharlentostring', 'widecharlentostrvar', + 'widechartostring', 'widechartostrvar', + 'widestringtoucs4string', 'write', 'writeln' + ), + 'SysUtils': ( + 'abort', 'addexitproc', 'addterminateproc', 'adjustlinebreaks', + 'allocmem', 'ansicomparefilename', 'ansicomparestr', + 'ansicomparetext', 'ansidequotedstr', 'ansiextractquotedstr', + 'ansilastchar', 'ansilowercase', 'ansilowercasefilename', + 'ansipos', 'ansiquotedstr', 'ansisamestr', 'ansisametext', + 'ansistrcomp', 'ansistricomp', 'ansistrlastchar', 'ansistrlcomp', + 'ansistrlicomp', 'ansistrlower', 'ansistrpos', 'ansistrrscan', + 'ansistrscan', 'ansistrupper', 'ansiuppercase', + 'ansiuppercasefilename', 'appendstr', 'assignstr', 'beep', + 'booltostr', 'bytetocharindex', 'bytetocharlen', 'bytetype', + 'callterminateprocs', 'changefileext', 'charlength', + 'chartobyteindex', 'chartobytelen', 'comparemem', 'comparestr', + 'comparetext', 'createdir', 'createguid', 'currentyear', + 'currtostr', 'currtostrf', 'date', 'datetimetofiledate', + 'datetimetostr', 'datetimetostring', 'datetimetosystemtime', + 'datetimetotimestamp', 'datetostr', 'dayofweek', 'decodedate', + 'decodedatefully', 'decodetime', 'deletefile', 'directoryexists', + 'diskfree', 'disksize', 'disposestr', 'encodedate', 'encodetime', + 'exceptionerrormessage', 'excludetrailingbackslash', + 'excludetrailingpathdelimiter', 'expandfilename', + 'expandfilenamecase', 'expanduncfilename', 'extractfiledir', + 'extractfiledrive', 'extractfileext', 'extractfilename', + 'extractfilepath', 'extractrelativepath', 'extractshortpathname', + 'fileage', 'fileclose', 'filecreate', 'filedatetodatetime', + 'fileexists', 'filegetattr', 'filegetdate', 'fileisreadonly', + 'fileopen', 'fileread', 'filesearch', 'fileseek', 'filesetattr', + 'filesetdate', 'filesetreadonly', 'filewrite', 'finalizepackage', + 'findclose', 'findcmdlineswitch', 'findfirst', 'findnext', + 'floattocurr', 'floattodatetime', 'floattodecimal', 'floattostr', + 'floattostrf', 'floattotext', 'floattotextfmt', 'fmtloadstr', + 'fmtstr', 'forcedirectories', 'format', 'formatbuf', 'formatcurr', + 'formatdatetime', 'formatfloat', 'freeandnil', 'getcurrentdir', + 'getenvironmentvariable', 'getfileversion', 'getformatsettings', + 'getlocaleformatsettings', 'getmodulename', 'getpackagedescription', + 'getpackageinfo', 'gettime', 'guidtostring', 'incamonth', + 'includetrailingbackslash', 'includetrailingpathdelimiter', + 'incmonth', 'initializepackage', 'interlockeddecrement', + 'interlockedexchange', 'interlockedexchangeadd', + 'interlockedincrement', 'inttohex', 'inttostr', 'isdelimiter', + 'isequalguid', 'isleapyear', 'ispathdelimiter', 'isvalidident', + 'languages', 'lastdelimiter', 'loadpackage', 'loadstr', + 'lowercase', 'msecstotimestamp', 'newstr', 'nextcharindex', 'now', + 'outofmemoryerror', 'quotedstr', 'raiselastoserror', + 'raiselastwin32error', 'removedir', 'renamefile', 'replacedate', + 'replacetime', 'safeloadlibrary', 'samefilename', 'sametext', + 'setcurrentdir', 'showexception', 'sleep', 'stralloc', 'strbufsize', + 'strbytetype', 'strcat', 'strcharlength', 'strcomp', 'strcopy', + 'strdispose', 'strecopy', 'strend', 'strfmt', 'stricomp', + 'stringreplace', 'stringtoguid', 'strlcat', 'strlcomp', 'strlcopy', + 'strlen', 'strlfmt', 'strlicomp', 'strlower', 'strmove', 'strnew', + 'strnextchar', 'strpas', 'strpcopy', 'strplcopy', 'strpos', + 'strrscan', 'strscan', 'strtobool', 'strtobooldef', 'strtocurr', + 'strtocurrdef', 'strtodate', 'strtodatedef', 'strtodatetime', + 'strtodatetimedef', 'strtofloat', 'strtofloatdef', 'strtoint', + 'strtoint64', 'strtoint64def', 'strtointdef', 'strtotime', + 'strtotimedef', 'strupper', 'supports', 'syserrormessage', + 'systemtimetodatetime', 'texttofloat', 'time', 'timestamptodatetime', + 'timestamptomsecs', 'timetostr', 'trim', 'trimleft', 'trimright', + 'tryencodedate', 'tryencodetime', 'tryfloattocurr', 'tryfloattodatetime', + 'trystrtobool', 'trystrtocurr', 'trystrtodate', 'trystrtodatetime', + 'trystrtofloat', 'trystrtoint', 'trystrtoint64', 'trystrtotime', + 'unloadpackage', 'uppercase', 'widecomparestr', 'widecomparetext', + 'widefmtstr', 'wideformat', 'wideformatbuf', 'widelowercase', + 'widesamestr', 'widesametext', 'wideuppercase', 'win32check', + 'wraptext' + ), + 'Classes': ( + 'activateclassgroup', 'allocatehwnd', 'bintohex', 'checksynchronize', + 'collectionsequal', 'countgenerations', 'deallocatehwnd', 'equalrect', + 'extractstrings', 'findclass', 'findglobalcomponent', 'getclass', + 'groupdescendantswith', 'hextobin', 'identtoint', + 'initinheritedcomponent', 'inttoident', 'invalidpoint', + 'isuniqueglobalcomponentname', 'linestart', 'objectbinarytotext', + 'objectresourcetotext', 'objecttexttobinary', 'objecttexttoresource', + 'pointsequal', 'readcomponentres', 'readcomponentresex', + 'readcomponentresfile', 'rect', 'registerclass', 'registerclassalias', + 'registerclasses', 'registercomponents', 'registerintegerconsts', + 'registernoicon', 'registernonactivex', 'smallpoint', 'startclassgroup', + 'teststreamformat', 'unregisterclass', 'unregisterclasses', + 'unregisterintegerconsts', 'unregistermoduleclasses', + 'writecomponentresfile' + ), + 'Math': ( + 'arccos', 'arccosh', 'arccot', 'arccoth', 'arccsc', 'arccsch', 'arcsec', + 'arcsech', 'arcsin', 'arcsinh', 'arctan2', 'arctanh', 'ceil', + 'comparevalue', 'cosecant', 'cosh', 'cot', 'cotan', 'coth', 'csc', + 'csch', 'cycletodeg', 'cycletograd', 'cycletorad', 'degtocycle', + 'degtograd', 'degtorad', 'divmod', 'doubledecliningbalance', + 'ensurerange', 'floor', 'frexp', 'futurevalue', 'getexceptionmask', + 'getprecisionmode', 'getroundmode', 'gradtocycle', 'gradtodeg', + 'gradtorad', 'hypot', 'inrange', 'interestpayment', 'interestrate', + 'internalrateofreturn', 'intpower', 'isinfinite', 'isnan', 'iszero', + 'ldexp', 'lnxp1', 'log10', 'log2', 'logn', 'max', 'maxintvalue', + 'maxvalue', 'mean', 'meanandstddev', 'min', 'minintvalue', 'minvalue', + 'momentskewkurtosis', 'netpresentvalue', 'norm', 'numberofperiods', + 'payment', 'periodpayment', 'poly', 'popnstddev', 'popnvariance', + 'power', 'presentvalue', 'radtocycle', 'radtodeg', 'radtograd', + 'randg', 'randomrange', 'roundto', 'samevalue', 'sec', 'secant', + 'sech', 'setexceptionmask', 'setprecisionmode', 'setroundmode', + 'sign', 'simpleroundto', 'sincos', 'sinh', 'slndepreciation', 'stddev', + 'sum', 'sumint', 'sumofsquares', 'sumsandsquares', 'syddepreciation', + 'tan', 'tanh', 'totalvariance', 'variance' + ) + } + + ASM_REGISTERS = { + 'ah', 'al', 'ax', 'bh', 'bl', 'bp', 'bx', 'ch', 'cl', 'cr0', + 'cr1', 'cr2', 'cr3', 'cr4', 'cs', 'cx', 'dh', 'di', 'dl', 'dr0', + 'dr1', 'dr2', 'dr3', 'dr4', 'dr5', 'dr6', 'dr7', 'ds', 'dx', + 'eax', 'ebp', 'ebx', 'ecx', 'edi', 'edx', 'es', 'esi', 'esp', + 'fs', 'gs', 'mm0', 'mm1', 'mm2', 'mm3', 'mm4', 'mm5', 'mm6', + 'mm7', 'si', 'sp', 'ss', 'st0', 'st1', 'st2', 'st3', 'st4', 'st5', + 'st6', 'st7', 'xmm0', 'xmm1', 'xmm2', 'xmm3', 'xmm4', 'xmm5', + 'xmm6', 'xmm7' + } + + ASM_INSTRUCTIONS = { + 'aaa', 'aad', 'aam', 'aas', 'adc', 'add', 'and', 'arpl', 'bound', + 'bsf', 'bsr', 'bswap', 'bt', 'btc', 'btr', 'bts', 'call', 'cbw', + 'cdq', 'clc', 'cld', 'cli', 'clts', 'cmc', 'cmova', 'cmovae', + 'cmovb', 'cmovbe', 'cmovc', 'cmovcxz', 'cmove', 'cmovg', + 'cmovge', 'cmovl', 'cmovle', 'cmovna', 'cmovnae', 'cmovnb', + 'cmovnbe', 'cmovnc', 'cmovne', 'cmovng', 'cmovnge', 'cmovnl', + 'cmovnle', 'cmovno', 'cmovnp', 'cmovns', 'cmovnz', 'cmovo', + 'cmovp', 'cmovpe', 'cmovpo', 'cmovs', 'cmovz', 'cmp', 'cmpsb', + 'cmpsd', 'cmpsw', 'cmpxchg', 'cmpxchg486', 'cmpxchg8b', 'cpuid', + 'cwd', 'cwde', 'daa', 'das', 'dec', 'div', 'emms', 'enter', 'hlt', + 'ibts', 'icebp', 'idiv', 'imul', 'in', 'inc', 'insb', 'insd', + 'insw', 'int', 'int01', 'int03', 'int1', 'int3', 'into', 'invd', + 'invlpg', 'iret', 'iretd', 'iretw', 'ja', 'jae', 'jb', 'jbe', + 'jc', 'jcxz', 'jcxz', 'je', 'jecxz', 'jg', 'jge', 'jl', 'jle', + 'jmp', 'jna', 'jnae', 'jnb', 'jnbe', 'jnc', 'jne', 'jng', 'jnge', + 'jnl', 'jnle', 'jno', 'jnp', 'jns', 'jnz', 'jo', 'jp', 'jpe', + 'jpo', 'js', 'jz', 'lahf', 'lar', 'lcall', 'lds', 'lea', 'leave', + 'les', 'lfs', 'lgdt', 'lgs', 'lidt', 'ljmp', 'lldt', 'lmsw', + 'loadall', 'loadall286', 'lock', 'lodsb', 'lodsd', 'lodsw', + 'loop', 'loope', 'loopne', 'loopnz', 'loopz', 'lsl', 'lss', 'ltr', + 'mov', 'movd', 'movq', 'movsb', 'movsd', 'movsw', 'movsx', + 'movzx', 'mul', 'neg', 'nop', 'not', 'or', 'out', 'outsb', 'outsd', + 'outsw', 'pop', 'popa', 'popad', 'popaw', 'popf', 'popfd', 'popfw', + 'push', 'pusha', 'pushad', 'pushaw', 'pushf', 'pushfd', 'pushfw', + 'rcl', 'rcr', 'rdmsr', 'rdpmc', 'rdshr', 'rdtsc', 'rep', 'repe', + 'repne', 'repnz', 'repz', 'ret', 'retf', 'retn', 'rol', 'ror', + 'rsdc', 'rsldt', 'rsm', 'sahf', 'sal', 'salc', 'sar', 'sbb', + 'scasb', 'scasd', 'scasw', 'seta', 'setae', 'setb', 'setbe', + 'setc', 'setcxz', 'sete', 'setg', 'setge', 'setl', 'setle', + 'setna', 'setnae', 'setnb', 'setnbe', 'setnc', 'setne', 'setng', + 'setnge', 'setnl', 'setnle', 'setno', 'setnp', 'setns', 'setnz', + 'seto', 'setp', 'setpe', 'setpo', 'sets', 'setz', 'sgdt', 'shl', + 'shld', 'shr', 'shrd', 'sidt', 'sldt', 'smi', 'smint', 'smintold', + 'smsw', 'stc', 'std', 'sti', 'stosb', 'stosd', 'stosw', 'str', + 'sub', 'svdc', 'svldt', 'svts', 'syscall', 'sysenter', 'sysexit', + 'sysret', 'test', 'ud1', 'ud2', 'umov', 'verr', 'verw', 'wait', + 'wbinvd', 'wrmsr', 'wrshr', 'xadd', 'xbts', 'xchg', 'xlat', + 'xlatb', 'xor' + } + + def __init__(self, **options): + Lexer.__init__(self, **options) + self.keywords = set() + if get_bool_opt(options, 'turbopascal', True): + self.keywords.update(self.TURBO_PASCAL_KEYWORDS) + if get_bool_opt(options, 'delphi', True): + self.keywords.update(self.DELPHI_KEYWORDS) + if get_bool_opt(options, 'freepascal', True): + self.keywords.update(self.FREE_PASCAL_KEYWORDS) + self.builtins = set() + for unit in get_list_opt(options, 'units', list(self.BUILTIN_UNITS)): + self.builtins.update(self.BUILTIN_UNITS[unit]) + + def get_tokens_unprocessed(self, text): + scanner = Scanner(text, re.DOTALL | re.MULTILINE | re.IGNORECASE) + stack = ['initial'] + in_function_block = False + in_property_block = False + was_dot = False + next_token_is_function = False + next_token_is_property = False + collect_labels = False + block_labels = set() + brace_balance = [0, 0] + + while not scanner.eos: + token = Error + + if stack[-1] == 'initial': + if scanner.scan(r'\s+'): + token = Text + elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'): + if scanner.match.startswith('$'): + token = Comment.Preproc + else: + token = Comment.Multiline + elif scanner.scan(r'//.*?$'): + token = Comment.Single + elif scanner.scan(r'[-+*\/=<>:;,.@\^]'): + token = Operator + # stop label highlighting on next ";" + if collect_labels and scanner.match == ';': + collect_labels = False + elif scanner.scan(r'[\(\)\[\]]+'): + token = Punctuation + # abort function naming ``foo = Function(...)`` + next_token_is_function = False + # if we are in a function block we count the open + # braces because ootherwise it's impossible to + # determine the end of the modifier context + if in_function_block or in_property_block: + if scanner.match == '(': + brace_balance[0] += 1 + elif scanner.match == ')': + brace_balance[0] -= 1 + elif scanner.match == '[': + brace_balance[1] += 1 + elif scanner.match == ']': + brace_balance[1] -= 1 + elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'): + lowercase_name = scanner.match.lower() + if lowercase_name == 'result': + token = Name.Builtin.Pseudo + elif lowercase_name in self.keywords: + token = Keyword + # if we are in a special block and a + # block ending keyword occours (and the parenthesis + # is balanced) we end the current block context + if (in_function_block or in_property_block) and \ + lowercase_name in self.BLOCK_KEYWORDS and \ + brace_balance[0] <= 0 and \ + brace_balance[1] <= 0: + in_function_block = False + in_property_block = False + brace_balance = [0, 0] + block_labels = set() + if lowercase_name in ('label', 'goto'): + collect_labels = True + elif lowercase_name == 'asm': + stack.append('asm') + elif lowercase_name == 'property': + in_property_block = True + next_token_is_property = True + elif lowercase_name in ('procedure', 'operator', + 'function', 'constructor', + 'destructor'): + in_function_block = True + next_token_is_function = True + # we are in a function block and the current name + # is in the set of registered modifiers. highlight + # it as pseudo keyword + elif in_function_block and \ + lowercase_name in self.FUNCTION_MODIFIERS: + token = Keyword.Pseudo + # if we are in a property highlight some more + # modifiers + elif in_property_block and \ + lowercase_name in ('read', 'write'): + token = Keyword.Pseudo + next_token_is_function = True + # if the last iteration set next_token_is_function + # to true we now want this name highlighted as + # function. so do that and reset the state + elif next_token_is_function: + # Look if the next token is a dot. If yes it's + # not a function, but a class name and the + # part after the dot a function name + if scanner.test(r'\s*\.\s*'): + token = Name.Class + # it's not a dot, our job is done + else: + token = Name.Function + next_token_is_function = False + # same for properties + elif next_token_is_property: + token = Name.Property + next_token_is_property = False + # Highlight this token as label and add it + # to the list of known labels + elif collect_labels: + token = Name.Label + block_labels.add(scanner.match.lower()) + # name is in list of known labels + elif lowercase_name in block_labels: + token = Name.Label + elif lowercase_name in self.BUILTIN_TYPES: + token = Keyword.Type + elif lowercase_name in self.DIRECTIVES: + token = Keyword.Pseudo + # builtins are just builtins if the token + # before isn't a dot + elif not was_dot and lowercase_name in self.builtins: + token = Name.Builtin + else: + token = Name + elif scanner.scan(r"'"): + token = String + stack.append('string') + elif scanner.scan(r'\#(\d+|\$[0-9A-Fa-f]+)'): + token = String.Char + elif scanner.scan(r'\$[0-9A-Fa-f]+'): + token = Number.Hex + elif scanner.scan(r'\d+(?![eE]|\.[^.])'): + token = Number.Integer + elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'): + token = Number.Float + else: + # if the stack depth is deeper than once, pop + if len(stack) > 1: + stack.pop() + scanner.get_char() + + elif stack[-1] == 'string': + if scanner.scan(r"''"): + token = String.Escape + elif scanner.scan(r"'"): + token = String + stack.pop() + elif scanner.scan(r"[^']*"): + token = String + else: + scanner.get_char() + stack.pop() + + elif stack[-1] == 'asm': + if scanner.scan(r'\s+'): + token = Text + elif scanner.scan(r'end'): + token = Keyword + stack.pop() + elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'): + if scanner.match.startswith('$'): + token = Comment.Preproc + else: + token = Comment.Multiline + elif scanner.scan(r'//.*?$'): + token = Comment.Single + elif scanner.scan(r"'"): + token = String + stack.append('string') + elif scanner.scan(r'@@[A-Za-z_][A-Za-z_0-9]*'): + token = Name.Label + elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'): + lowercase_name = scanner.match.lower() + if lowercase_name in self.ASM_INSTRUCTIONS: + token = Keyword + elif lowercase_name in self.ASM_REGISTERS: + token = Name.Builtin + else: + token = Name + elif scanner.scan(r'[-+*\/=<>:;,.@\^]+'): + token = Operator + elif scanner.scan(r'[\(\)\[\]]+'): + token = Punctuation + elif scanner.scan(r'\$[0-9A-Fa-f]+'): + token = Number.Hex + elif scanner.scan(r'\d+(?![eE]|\.[^.])'): + token = Number.Integer + elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'): + token = Number.Float + else: + scanner.get_char() + stack.pop() + + # save the dot!!!11 + if scanner.match.strip(): + was_dot = scanner.match == '.' + yield scanner.start_pos, token, scanner.match or '' + + +class AdaLexer(RegexLexer): + """ + For Ada source code. + + .. versionadded:: 1.3 + """ + + name = 'Ada' + aliases = ['ada', 'ada95', 'ada2005'] + filenames = ['*.adb', '*.ads', '*.ada'] + mimetypes = ['text/x-ada'] + + flags = re.MULTILINE | re.IGNORECASE + + tokens = { + 'root': [ + (r'[^\S\n]+', Text), + (r'--.*?\n', Comment.Single), + (r'[^\S\n]+', Text), + (r'function|procedure|entry', Keyword.Declaration, 'subprogram'), + (r'(subtype|type)(\s+)(\w+)', + bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'), + (r'task|protected', Keyword.Declaration), + (r'(subtype)(\s+)', bygroups(Keyword.Declaration, Text)), + (r'(end)(\s+)', bygroups(Keyword.Reserved, Text), 'end'), + (r'(pragma)(\s+)(\w+)', bygroups(Keyword.Reserved, Text, + Comment.Preproc)), + (r'(true|false|null)\b', Keyword.Constant), + (words(( + 'Address', 'Byte', 'Boolean', 'Character', 'Controlled', 'Count', + 'Cursor', 'Duration', 'File_Mode', 'File_Type', 'Float', 'Generator', + 'Integer', 'Long_Float', 'Long_Integer', 'Long_Long_Float', + 'Long_Long_Integer', 'Natural', 'Positive', 'Reference_Type', + 'Short_Float', 'Short_Integer', 'Short_Short_Float', + 'Short_Short_Integer', 'String', 'Wide_Character', 'Wide_String'), + suffix=r'\b'), + Keyword.Type), + (r'(and(\s+then)?|in|mod|not|or(\s+else)|rem)\b', Operator.Word), + (r'generic|private', Keyword.Declaration), + (r'package', Keyword.Declaration, 'package'), + (r'array\b', Keyword.Reserved, 'array_def'), + (r'(with|use)(\s+)', bygroups(Keyword.Namespace, Text), 'import'), + (r'(\w+)(\s*)(:)(\s*)(constant)', + bygroups(Name.Constant, Text, Punctuation, Text, + Keyword.Reserved)), + (r'<<\w+>>', Name.Label), + (r'(\w+)(\s*)(:)(\s*)(declare|begin|loop|for|while)', + bygroups(Name.Label, Text, Punctuation, Text, Keyword.Reserved)), + (words(( + 'abort', 'abs', 'abstract', 'accept', 'access', 'aliased', 'all', + 'array', 'at', 'begin', 'body', 'case', 'constant', 'declare', + 'delay', 'delta', 'digits', 'do', 'else', 'elsif', 'end', 'entry', + 'exception', 'exit', 'interface', 'for', 'goto', 'if', 'is', 'limited', + 'loop', 'new', 'null', 'of', 'or', 'others', 'out', 'overriding', + 'pragma', 'protected', 'raise', 'range', 'record', 'renames', 'requeue', + 'return', 'reverse', 'select', 'separate', 'subtype', 'synchronized', + 'task', 'tagged', 'terminate', 'then', 'type', 'until', 'when', + 'while', 'xor'), prefix=r'\b', suffix=r'\b'), + Keyword.Reserved), + (r'"[^"]*"', String), + include('attribute'), + include('numbers'), + (r"'[^']'", String.Character), + (r'(\w+)(\s*|[(,])', bygroups(Name, using(this))), + (r"(<>|=>|:=|[()|:;,.'])", Punctuation), + (r'[*<>+=/&-]', Operator), + (r'\n+', Text), + ], + 'numbers': [ + (r'[0-9_]+#[0-9a-f_\.]+#', Number.Hex), + (r'[0-9_]+\.[0-9_]*', Number.Float), + (r'[0-9_]+', Number.Integer), + ], + 'attribute': [ + (r"(')(\w+)", bygroups(Punctuation, Name.Attribute)), + ], + 'subprogram': [ + (r'\(', Punctuation, ('#pop', 'formal_part')), + (r';', Punctuation, '#pop'), + (r'is\b', Keyword.Reserved, '#pop'), + (r'"[^"]+"|\w+', Name.Function), + include('root'), + ], + 'end': [ + ('(if|case|record|loop|select)', Keyword.Reserved), + (r'"[^"]+"|[\w.]+', Name.Function), + (r'\s+', Text), + (';', Punctuation, '#pop'), + ], + 'type_def': [ + (r';', Punctuation, '#pop'), + (r'\(', Punctuation, 'formal_part'), + (r'with|and|use', Keyword.Reserved), + (r'array\b', Keyword.Reserved, ('#pop', 'array_def')), + (r'record\b', Keyword.Reserved, ('record_def')), + (r'(null record)(;)', bygroups(Keyword.Reserved, Punctuation), '#pop'), + include('root'), + ], + 'array_def': [ + (r';', Punctuation, '#pop'), + (r'(\w+)(\s+)(range)', bygroups(Keyword.Type, Text, Keyword.Reserved)), + include('root'), + ], + 'record_def': [ + (r'end record', Keyword.Reserved, '#pop'), + include('root'), + ], + 'import': [ + (r'[\w.]+', Name.Namespace, '#pop'), + default('#pop'), + ], + 'formal_part': [ + (r'\)', Punctuation, '#pop'), + (r'\w+', Name.Variable), + (r',|:[^=]', Punctuation), + (r'(in|not|null|out|access)\b', Keyword.Reserved), + include('root'), + ], + 'package': [ + ('body', Keyword.Declaration), + (r'is\s+new|renames', Keyword.Reserved), + ('is', Keyword.Reserved, '#pop'), + (';', Punctuation, '#pop'), + (r'\(', Punctuation, 'package_instantiation'), + (r'([\w.]+)', Name.Class), + include('root'), + ], + 'package_instantiation': [ + (r'("[^"]+"|\w+)(\s+)(=>)', bygroups(Name.Variable, Text, Punctuation)), + (r'[\w.\'"]', Text), + (r'\)', Punctuation, '#pop'), + include('root'), + ], + } diff --git a/pygments/lexers/pawn.py b/pygments/lexers/pawn.py old mode 100644 new mode 100755 index 3cdfbd0..164ea64 --- a/pygments/lexers/pawn.py +++ b/pygments/lexers/pawn.py @@ -1,199 +1,199 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.pawn - ~~~~~~~~~~~~~~~~~~~~ - - Lexers for the Pawn languages. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import RegexLexer -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Error -from pygments.util import get_bool_opt - -__all__ = ['SourcePawnLexer', 'PawnLexer'] - - -class SourcePawnLexer(RegexLexer): - """ - For SourcePawn source code with preprocessor directives. - - .. versionadded:: 1.6 - """ - name = 'SourcePawn' - aliases = ['sp'] - filenames = ['*.sp'] - mimetypes = ['text/x-sourcepawn'] - - #: optional Comment or Whitespace - _ws = r'(?:\s|//.*?\n|/\*.*?\*/)+' - #: only one /* */ style comment - _ws1 = r'\s*(?:/[*].*?[*]/\s*)*' - - tokens = { - 'root': [ - # preprocessor directives: without whitespace - (r'^#if\s+0', Comment.Preproc, 'if0'), - ('^#', Comment.Preproc, 'macro'), - # or with whitespace - ('^' + _ws1 + r'#if\s+0', Comment.Preproc, 'if0'), - ('^' + _ws1 + '#', Comment.Preproc, 'macro'), - (r'\n', Text), - (r'\s+', Text), - (r'\\\n', Text), # line continuation - (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), - (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), - (r'[{}]', Punctuation), - (r'L?"', String, 'string'), - (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), - (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float), - (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), - (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), - (r'0[0-7]+[LlUu]*', Number.Oct), - (r'\d+[LlUu]*', Number.Integer), - (r'\*/', Error), - (r'[~!%^&*+=|?:<>/-]', Operator), - (r'[()\[\],.;]', Punctuation), - (r'(case|const|continue|native|' - r'default|else|enum|for|if|new|operator|' - r'public|return|sizeof|static|decl|struct|switch)\b', Keyword), - (r'(bool|Float)\b', Keyword.Type), - (r'(true|false)\b', Keyword.Constant), - (r'[a-zA-Z_]\w*', Name), - ], - 'string': [ - (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - (r'[^\\"\n]+', String), # all other characters - (r'\\\n', String), # line continuation - (r'\\', String), # stray backslash - ], - 'macro': [ - (r'[^/\n]+', Comment.Preproc), - (r'/\*(.|\n)*?\*/', Comment.Multiline), - (r'//.*?\n', Comment.Single, '#pop'), - (r'/', Comment.Preproc), - (r'(?<=\\)\n', Comment.Preproc), - (r'\n', Comment.Preproc, '#pop'), - ], - 'if0': [ - (r'^\s*#if.*?(?/-]', Operator), - (r'[()\[\],.;]', Punctuation), - (r'(switch|case|default|const|new|static|char|continue|break|' - r'if|else|for|while|do|operator|enum|' - r'public|return|sizeof|tagof|state|goto)\b', Keyword), - (r'(bool|Float)\b', Keyword.Type), - (r'(true|false)\b', Keyword.Constant), - (r'[a-zA-Z_]\w*', Name), - ], - 'string': [ - (r'"', String, '#pop'), - (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), - (r'[^\\"\n]+', String), # all other characters - (r'\\\n', String), # line continuation - (r'\\', String), # stray backslash - ], - 'macro': [ - (r'[^/\n]+', Comment.Preproc), - (r'/\*(.|\n)*?\*/', Comment.Multiline), - (r'//.*?\n', Comment.Single, '#pop'), - (r'/', Comment.Preproc), - (r'(?<=\\)\n', Comment.Preproc), - (r'\n', Comment.Preproc, '#pop'), - ], - 'if0': [ - (r'^\s*#if.*?(?/-]', Operator), + (r'[()\[\],.;]', Punctuation), + (r'(case|const|continue|native|' + r'default|else|enum|for|if|new|operator|' + r'public|return|sizeof|static|decl|struct|switch)\b', Keyword), + (r'(bool|Float)\b', Keyword.Type), + (r'(true|false)\b', Keyword.Constant), + (r'[a-zA-Z_]\w*', Name), + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), + (r'[^\\"\n]+', String), # all other characters + (r'\\\n', String), # line continuation + (r'\\', String), # stray backslash + ], + 'macro': [ + (r'[^/\n]+', Comment.Preproc), + (r'/\*(.|\n)*?\*/', Comment.Multiline), + (r'//.*?\n', Comment.Single, '#pop'), + (r'/', Comment.Preproc), + (r'(?<=\\)\n', Comment.Preproc), + (r'\n', Comment.Preproc, '#pop'), + ], + 'if0': [ + (r'^\s*#if.*?(?/-]', Operator), + (r'[()\[\],.;]', Punctuation), + (r'(switch|case|default|const|new|static|char|continue|break|' + r'if|else|for|while|do|operator|enum|' + r'public|return|sizeof|tagof|state|goto)\b', Keyword), + (r'(bool|Float)\b', Keyword.Type), + (r'(true|false)\b', Keyword.Constant), + (r'[a-zA-Z_]\w*', Name), + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), + (r'[^\\"\n]+', String), # all other characters + (r'\\\n', String), # line continuation + (r'\\', String), # stray backslash + ], + 'macro': [ + (r'[^/\n]+', Comment.Preproc), + (r'/\*(.|\n)*?\*/', Comment.Multiline), + (r'//.*?\n', Comment.Single, '#pop'), + (r'/', Comment.Preproc), + (r'(?<=\\)\n', Comment.Preproc), + (r'\n', Comment.Preproc, '#pop'), + ], + 'if0': [ + (r'^\s*#if.*?(?`_ source code. - """ - - name = 'Perl' - aliases = ['perl', 'pl'] - filenames = ['*.pl', '*.pm', '*.t'] - mimetypes = ['text/x-perl', 'application/x-perl'] - - flags = re.DOTALL | re.MULTILINE - # TODO: give this to a perl guy who knows how to parse perl... - tokens = { - 'balanced-regex': [ - (r'/(\\\\|\\[^\\]|[^\\/])*/[egimosx]*', String.Regex, '#pop'), - (r'!(\\\\|\\[^\\]|[^\\!])*![egimosx]*', String.Regex, '#pop'), - (r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'), - (r'\{(\\\\|\\[^\\]|[^\\}])*\}[egimosx]*', String.Regex, '#pop'), - (r'<(\\\\|\\[^\\]|[^\\>])*>[egimosx]*', String.Regex, '#pop'), - (r'\[(\\\\|\\[^\\]|[^\\\]])*\][egimosx]*', String.Regex, '#pop'), - (r'\((\\\\|\\[^\\]|[^\\)])*\)[egimosx]*', String.Regex, '#pop'), - (r'@(\\\\|\\[^\\]|[^\\@])*@[egimosx]*', String.Regex, '#pop'), - (r'%(\\\\|\\[^\\]|[^\\%])*%[egimosx]*', String.Regex, '#pop'), - (r'\$(\\\\|\\[^\\]|[^\\$])*\$[egimosx]*', String.Regex, '#pop'), - ], - 'root': [ - (r'\A\#!.+?$', Comment.Hashbang), - (r'\#.*?$', Comment.Single), - (r'^=[a-zA-Z0-9]+\s+.*?\n=cut', Comment.Multiline), - (words(( - 'case', 'continue', 'do', 'else', 'elsif', 'for', 'foreach', - 'if', 'last', 'my', 'next', 'our', 'redo', 'reset', 'then', - 'unless', 'until', 'while', 'print', 'new', 'BEGIN', - 'CHECK', 'INIT', 'END', 'return'), suffix=r'\b'), - Keyword), - (r'(format)(\s+)(\w+)(\s*)(=)(\s*\n)', - bygroups(Keyword, Text, Name, Text, Punctuation, Text), 'format'), - (r'(eq|lt|gt|le|ge|ne|not|and|or|cmp)\b', Operator.Word), - # common delimiters - (r's/(\\\\|\\[^\\]|[^\\/])*/(\\\\|\\[^\\]|[^\\/])*/[egimosx]*', - String.Regex), - (r's!(\\\\|\\!|[^!])*!(\\\\|\\!|[^!])*![egimosx]*', String.Regex), - (r's\\(\\\\|[^\\])*\\(\\\\|[^\\])*\\[egimosx]*', String.Regex), - (r's@(\\\\|\\[^\\]|[^\\@])*@(\\\\|\\[^\\]|[^\\@])*@[egimosx]*', - String.Regex), - (r's%(\\\\|\\[^\\]|[^\\%])*%(\\\\|\\[^\\]|[^\\%])*%[egimosx]*', - String.Regex), - # balanced delimiters - (r's\{(\\\\|\\[^\\]|[^\\}])*\}\s*', String.Regex, 'balanced-regex'), - (r's<(\\\\|\\[^\\]|[^\\>])*>\s*', String.Regex, 'balanced-regex'), - (r's\[(\\\\|\\[^\\]|[^\\\]])*\]\s*', String.Regex, - 'balanced-regex'), - (r's\((\\\\|\\[^\\]|[^\\)])*\)\s*', String.Regex, - 'balanced-regex'), - - (r'm?/(\\\\|\\[^\\]|[^\\/\n])*/[gcimosx]*', String.Regex), - (r'm(?=[/!\\{<\[(@%$])', String.Regex, 'balanced-regex'), - (r'((?<==~)|(?<=\())\s*/(\\\\|\\[^\\]|[^\\/])*/[gcimosx]*', - String.Regex), - (r'\s+', Text), - (words(( - 'abs', 'accept', 'alarm', 'atan2', 'bind', 'binmode', 'bless', 'caller', 'chdir', - 'chmod', 'chomp', 'chop', 'chown', 'chr', 'chroot', 'close', 'closedir', 'connect', - 'continue', 'cos', 'crypt', 'dbmclose', 'dbmopen', 'defined', 'delete', 'die', - 'dump', 'each', 'endgrent', 'endhostent', 'endnetent', 'endprotoent', - 'endpwent', 'endservent', 'eof', 'eval', 'exec', 'exists', 'exit', 'exp', 'fcntl', - 'fileno', 'flock', 'fork', 'format', 'formline', 'getc', 'getgrent', 'getgrgid', - 'getgrnam', 'gethostbyaddr', 'gethostbyname', 'gethostent', 'getlogin', - 'getnetbyaddr', 'getnetbyname', 'getnetent', 'getpeername', 'getpgrp', - 'getppid', 'getpriority', 'getprotobyname', 'getprotobynumber', - 'getprotoent', 'getpwent', 'getpwnam', 'getpwuid', 'getservbyname', - 'getservbyport', 'getservent', 'getsockname', 'getsockopt', 'glob', 'gmtime', - 'goto', 'grep', 'hex', 'import', 'index', 'int', 'ioctl', 'join', 'keys', 'kill', 'last', - 'lc', 'lcfirst', 'length', 'link', 'listen', 'local', 'localtime', 'log', 'lstat', - 'map', 'mkdir', 'msgctl', 'msgget', 'msgrcv', 'msgsnd', 'my', 'next', 'oct', 'open', - 'opendir', 'ord', 'our', 'pack', 'pipe', 'pop', 'pos', 'printf', - 'prototype', 'push', 'quotemeta', 'rand', 'read', 'readdir', - 'readline', 'readlink', 'readpipe', 'recv', 'redo', 'ref', 'rename', - 'reverse', 'rewinddir', 'rindex', 'rmdir', 'scalar', 'seek', 'seekdir', - 'select', 'semctl', 'semget', 'semop', 'send', 'setgrent', 'sethostent', 'setnetent', - 'setpgrp', 'setpriority', 'setprotoent', 'setpwent', 'setservent', - 'setsockopt', 'shift', 'shmctl', 'shmget', 'shmread', 'shmwrite', 'shutdown', - 'sin', 'sleep', 'socket', 'socketpair', 'sort', 'splice', 'split', 'sprintf', 'sqrt', - 'srand', 'stat', 'study', 'substr', 'symlink', 'syscall', 'sysopen', 'sysread', - 'sysseek', 'system', 'syswrite', 'tell', 'telldir', 'tie', 'tied', 'time', 'times', 'tr', - 'truncate', 'uc', 'ucfirst', 'umask', 'undef', 'unlink', 'unpack', 'unshift', 'untie', - 'utime', 'values', 'vec', 'wait', 'waitpid', 'wantarray', 'warn', 'write'), suffix=r'\b'), - Name.Builtin), - (r'((__(DATA|DIE|WARN)__)|(STD(IN|OUT|ERR)))\b', Name.Builtin.Pseudo), - (r'(<<)([\'"]?)([a-zA-Z_]\w*)(\2;?\n.*?\n)(\3)(\n)', - bygroups(String, String, String.Delimiter, String, String.Delimiter, Text)), - (r'__END__', Comment.Preproc, 'end-part'), - (r'\$\^[ADEFHILMOPSTWX]', Name.Variable.Global), - (r"\$[\\\"\[\]'&`+*.,;=%~?@$!<>(^|/-](?!\w)", Name.Variable.Global), - (r'[$@%#]+', Name.Variable, 'varname'), - (r'0_?[0-7]+(_[0-7]+)*', Number.Oct), - (r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex), - (r'0b[01]+(_[01]+)*', Number.Bin), - (r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?', - Number.Float), - (r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float), - (r'\d+(_\d+)*', Number.Integer), - (r"'(\\\\|\\[^\\]|[^'\\])*'", String), - (r'"(\\\\|\\[^\\]|[^"\\])*"', String), - (r'`(\\\\|\\[^\\]|[^`\\])*`', String.Backtick), - (r'<([^\s>]+)>', String.Regex), - (r'(q|qq|qw|qr|qx)\{', String.Other, 'cb-string'), - (r'(q|qq|qw|qr|qx)\(', String.Other, 'rb-string'), - (r'(q|qq|qw|qr|qx)\[', String.Other, 'sb-string'), - (r'(q|qq|qw|qr|qx)\<', String.Other, 'lt-string'), - (r'(q|qq|qw|qr|qx)([\W_])(.|\n)*?\2', String.Other), - (r'(package)(\s+)([a-zA-Z_]\w*(?:::[a-zA-Z_]\w*)*)', - bygroups(Keyword, Text, Name.Namespace)), - (r'(use|require|no)(\s+)([a-zA-Z_]\w*(?:::[a-zA-Z_]\w*)*)', - bygroups(Keyword, Text, Name.Namespace)), - (r'(sub)(\s+)', bygroups(Keyword, Text), 'funcname'), - (words(( - 'no', 'package', 'require', 'use'), suffix=r'\b'), - Keyword), - (r'(\[\]|\*\*|::|<<|>>|>=|<=>|<=|={3}|!=|=~|' - r'!~|&&?|\|\||\.{1,3})', Operator), - (r'[-+/*%=<>&^|!\\~]=?', Operator), - (r'[()\[\]:;,<>/?{}]', Punctuation), # yes, there's no shortage - # of punctuation in Perl! - (r'(?=\w)', Name, 'name'), - ], - 'format': [ - (r'\.\n', String.Interpol, '#pop'), - (r'[^\n]*\n', String.Interpol), - ], - 'varname': [ - (r'\s+', Text), - (r'\{', Punctuation, '#pop'), # hash syntax? - (r'\)|,', Punctuation, '#pop'), # argument specifier - (r'\w+::', Name.Namespace), - (r'[\w:]+', Name.Variable, '#pop'), - ], - 'name': [ - (r'[a-zA-Z_]\w*(::[a-zA-Z_]\w*)*(::)?(?=\s*->)', Name.Namespace, '#pop'), - (r'[a-zA-Z_]\w*(::[a-zA-Z_]\w*)*::', Name.Namespace, '#pop'), - (r'[\w:]+', Name, '#pop'), - (r'[A-Z_]+(?=\W)', Name.Constant, '#pop'), - (r'(?=\W)', Text, '#pop'), - ], - 'funcname': [ - (r'[a-zA-Z_]\w*[!?]?', Name.Function), - (r'\s+', Text), - # argument declaration - (r'(\([$@%]*\))(\s*)', bygroups(Punctuation, Text)), - (r';', Punctuation, '#pop'), - (r'.*?\{', Punctuation, '#pop'), - ], - 'cb-string': [ - (r'\\[{}\\]', String.Other), - (r'\\', String.Other), - (r'\{', String.Other, 'cb-string'), - (r'\}', String.Other, '#pop'), - (r'[^{}\\]+', String.Other) - ], - 'rb-string': [ - (r'\\[()\\]', String.Other), - (r'\\', String.Other), - (r'\(', String.Other, 'rb-string'), - (r'\)', String.Other, '#pop'), - (r'[^()]+', String.Other) - ], - 'sb-string': [ - (r'\\[\[\]\\]', String.Other), - (r'\\', String.Other), - (r'\[', String.Other, 'sb-string'), - (r'\]', String.Other, '#pop'), - (r'[^\[\]]+', String.Other) - ], - 'lt-string': [ - (r'\\[<>\\]', String.Other), - (r'\\', String.Other), - (r'\<', String.Other, 'lt-string'), - (r'\>', String.Other, '#pop'), - (r'[^<>]+', String.Other) - ], - 'end-part': [ - (r'.+', Comment.Preproc, '#pop') - ] - } - - def analyse_text(text): - if shebang_matches(text, r'perl'): - return True - if re.search(r'(?:my|our)\s+[$@%(]', text): - return 0.9 - - -class Perl6Lexer(ExtendedRegexLexer): - """ - For `Raku `_ (a.k.a. Perl 6) source code. - - .. versionadded:: 2.0 - """ - - name = 'Perl6' - aliases = ['perl6', 'pl6', 'raku'] - filenames = ['*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', - '*.6pm', '*.p6m', '*.pm6', '*.t', '*.raku', '*.rakumod', - '*.rakutest', '*.rakudoc'] - mimetypes = ['text/x-perl6', 'application/x-perl6'] - flags = re.MULTILINE | re.DOTALL | re.UNICODE - - PERL6_IDENTIFIER_RANGE = r"['\w:-]" - - PERL6_KEYWORDS = ( - #Phasers - 'BEGIN','CATCH','CHECK','CLOSE','CONTROL','DOC','END','ENTER','FIRST', - 'INIT','KEEP','LAST','LEAVE','NEXT','POST','PRE','QUIT','UNDO', - #Keywords - 'anon','augment','but','class','constant','default','does','else', - 'elsif','enum','for','gather','given','grammar','has','if','import', - 'is','let','loop','made','make','method','module','multi','my','need', - 'orwith','our','proceed','proto','repeat','require','return', - 'return-rw','returns','role','rule','state','sub','submethod','subset', - 'succeed','supersede','token','try','unit','unless','until','use', - 'when','while','with','without', - #Traits - 'export','native','repr','required','rw','symbol', - ) - - PERL6_BUILTINS = ( - 'ACCEPTS','abs','abs2rel','absolute','accept','accessed','acos', - 'acosec','acosech','acosh','acotan','acotanh','acquire','act','action', - 'actions','add','add_attribute','add_enum_value','add_fallback', - 'add_method','add_parent','add_private_method','add_role','add_trustee', - 'adverb','after','all','allocate','allof','allowed','alternative-names', - 'annotations','antipair','antipairs','any','anyof','app_lifetime', - 'append','arch','archname','args','arity','Array','asec','asech','asin', - 'asinh','ASSIGN-KEY','ASSIGN-POS','assuming','ast','at','atan','atan2', - 'atanh','AT-KEY','atomic-assign','atomic-dec-fetch','atomic-fetch', - 'atomic-fetch-add','atomic-fetch-dec','atomic-fetch-inc', - 'atomic-fetch-sub','atomic-inc-fetch','AT-POS','attributes','auth', - 'await','backtrace','Bag','BagHash','bail-out','base','basename', - 'base-repeating','batch','BIND-KEY','BIND-POS','bind-stderr', - 'bind-stdin','bind-stdout','bind-udp','bits','bless','block','Bool', - 'bool-only','bounds','break','Bridge','broken','BUILD','build-date', - 'bytes','cache','callframe','calling-package','CALL-ME','callsame', - 'callwith','can','cancel','candidates','cando','can-ok','canonpath', - 'caps','caption','Capture','cas','catdir','categorize','categorize-list', - 'catfile','catpath','cause','ceiling','cglobal','changed','Channel', - 'chars','chdir','child','child-name','child-typename','chmod','chomp', - 'chop','chr','chrs','chunks','cis','classify','classify-list','cleanup', - 'clone','close','closed','close-stdin','cmp-ok','code','codes','collate', - 'column','comb','combinations','command','comment','compiler','Complex', - 'compose','compose_type','composer','condition','config', - 'configure_destroy','configure_type_checking','conj','connect', - 'constraints','construct','contains','contents','copy','cos','cosec', - 'cosech','cosh','cotan','cotanh','count','count-only','cpu-cores', - 'cpu-usage','CREATE','create_type','cross','cue','curdir','curupdir','d', - 'Date','DateTime','day','daycount','day-of-month','day-of-week', - 'day-of-year','days-in-month','declaration','decode','decoder','deepmap', - 'default','defined','DEFINITE','delayed','DELETE-KEY','DELETE-POS', - 'denominator','desc','DESTROY','destroyers','devnull','diag', - 'did-you-mean','die','dies-ok','dir','dirname','dir-sep','DISTROnames', - 'do','does','does-ok','done','done-testing','duckmap','dynamic','e', - 'eager','earlier','elems','emit','enclosing','encode','encoder', - 'encoding','end','ends-with','enum_from_value','enum_value_list', - 'enum_values','enums','eof','EVAL','eval-dies-ok','EVALFILE', - 'eval-lives-ok','exception','excludes-max','excludes-min','EXISTS-KEY', - 'EXISTS-POS','exit','exitcode','exp','expected','explicitly-manage', - 'expmod','extension','f','fail','fails-like','fc','feature','file', - 'filename','find_method','find_method_qualified','finish','first','flat', - 'flatmap','flip','floor','flunk','flush','fmt','format','formatter', - 'freeze','from','from-list','from-loop','from-posix','full', - 'full-barrier','get','get_value','getc','gist','got','grab','grabpairs', - 'grep','handle','handled','handles','hardware','has_accessor','Hash', - 'head','headers','hh-mm-ss','hidden','hides','hour','how','hyper','id', - 'illegal','im','in','indent','index','indices','indir','infinite', - 'infix','infix:<+>','infix:<->','install_method_cache','Instant', - 'instead','Int','int-bounds','interval','in-timezone','invalid-str', - 'invert','invocant','IO','IO::Notification.watch-path','is_trusted', - 'is_type','isa','is-absolute','isa-ok','is-approx','is-deeply', - 'is-hidden','is-initial-thread','is-int','is-lazy','is-leap-year', - 'isNaN','isnt','is-prime','is-relative','is-routine','is-setting', - 'is-win','item','iterator','join','keep','kept','KERNELnames','key', - 'keyof','keys','kill','kv','kxxv','l','lang','last','lastcall','later', - 'lazy','lc','leading','level','like','line','lines','link','List', - 'listen','live','lives-ok','local','lock','log','log10','lookup','lsb', - 'made','MAIN','make','Map','match','max','maxpairs','merge','message', - 'method','method_table','methods','migrate','min','minmax','minpairs', - 'minute','misplaced','Mix','MixHash','mkdir','mode','modified','month', - 'move','mro','msb','multi','multiness','my','name','named','named_names', - 'narrow','nativecast','native-descriptor','nativesizeof','new','new_type', - 'new-from-daycount','new-from-pairs','next','nextcallee','next-handle', - 'nextsame','nextwith','NFC','NFD','NFKC','NFKD','nl-in','nl-out', - 'nodemap','nok','none','norm','not','note','now','nude','Num', - 'numerator','Numeric','of','offset','offset-in-hours','offset-in-minutes', - 'ok','old','on-close','one','on-switch','open','opened','operation', - 'optional','ord','ords','orig','os-error','osname','out-buffer','pack', - 'package','package-kind','package-name','packages','pair','pairs', - 'pairup','parameter','params','parent','parent-name','parents','parse', - 'parse-base','parsefile','parse-names','parts','pass','path','path-sep', - 'payload','peer-host','peer-port','periods','perl','permutations','phaser', - 'pick','pickpairs','pid','placeholder','plan','plus','polar','poll', - 'polymod','pop','pos','positional','posix','postfix','postmatch', - 'precomp-ext','precomp-target','pred','prefix','prematch','prepend', - 'print','printf','print-nl','print-to','private','private_method_table', - 'proc','produce','Promise','prompt','protect','pull-one','push', - 'push-all','push-at-least','push-exactly','push-until-lazy','put', - 'qualifier-type','quit','r','race','radix','rand','range','Rat','raw', - 're','read','readchars','readonly','ready','Real','reallocate','reals', - 'reason','rebless','receive','recv','redispatcher','redo','reduce', - 'rel2abs','relative','release','rename','repeated','replacement', - 'report','reserved','resolve','restore','result','resume','rethrow', - 'reverse','right','rindex','rmdir','role','roles_to_compose','rolish', - 'roll','rootdir','roots','rotate','rotor','round','roundrobin', - 'routine-type','run','rwx','s','samecase','samemark','samewith','say', - 'schedule-on','scheduler','scope','sec','sech','second','seek','self', - 'send','Set','set_hidden','set_name','set_package','set_rw','set_value', - 'SetHash','set-instruments','setup_finalization','shape','share','shell', - 'shift','sibling','sigil','sign','signal','signals','signature','sin', - 'sinh','sink','sink-all','skip','skip-at-least','skip-at-least-pull-one', - 'skip-one','skip-rest','sleep','sleep-timer','sleep-until','Slip','slurp', - 'slurp-rest','slurpy','snap','snapper','so','socket-host','socket-port', - 'sort','source','source-package','spawn','SPEC','splice','split', - 'splitdir','splitpath','sprintf','spurt','sqrt','squish','srand','stable', - 'start','started','starts-with','status','stderr','stdout','Str', - 'sub_signature','subbuf','subbuf-rw','subname','subparse','subst', - 'subst-mutate','substr','substr-eq','substr-rw','subtest','succ','sum', - 'Supply','symlink','t','tail','take','take-rw','tan','tanh','tap', - 'target','target-name','tc','tclc','tell','then','throttle','throw', - 'throws-like','timezone','tmpdir','to','today','todo','toggle','to-posix', - 'total','trailing','trans','tree','trim','trim-leading','trim-trailing', - 'truncate','truncated-to','trusts','try_acquire','trying','twigil','type', - 'type_captures','typename','uc','udp','uncaught_handler','unimatch', - 'uniname','uninames','uniparse','uniprop','uniprops','unique','unival', - 'univals','unlike','unlink','unlock','unpack','unpolar','unshift', - 'unwrap','updir','USAGE','use-ok','utc','val','value','values','VAR', - 'variable','verbose-config','version','VMnames','volume','vow','w','wait', - 'warn','watch','watch-path','week','weekday-of-month','week-number', - 'week-year','WHAT','when','WHERE','WHEREFORE','WHICH','WHO', - 'whole-second','WHY','wordcase','words','workaround','wrap','write', - 'write-to','x','yada','year','yield','yyyy-mm-dd','z','zip','zip-latest', - - ) - - PERL6_BUILTIN_CLASSES = ( - #Booleans - 'False','True', - #Classes - 'Any','Array','Associative','AST','atomicint','Attribute','Backtrace', - 'Backtrace::Frame','Bag','Baggy','BagHash','Blob','Block','Bool','Buf', - 'Callable','CallFrame','Cancellation','Capture','CArray','Channel','Code', - 'compiler','Complex','ComplexStr','Cool','CurrentThreadScheduler', - 'Cursor','Date','Dateish','DateTime','Distro','Duration','Encoding', - 'Exception','Failure','FatRat','Grammar','Hash','HyperWhatever','Instant', - 'Int','int16','int32','int64','int8','IntStr','IO','IO::ArgFiles', - 'IO::CatHandle','IO::Handle','IO::Notification','IO::Path', - 'IO::Path::Cygwin','IO::Path::QNX','IO::Path::Unix','IO::Path::Win32', - 'IO::Pipe','IO::Socket','IO::Socket::Async','IO::Socket::INET','IO::Spec', - 'IO::Spec::Cygwin','IO::Spec::QNX','IO::Spec::Unix','IO::Spec::Win32', - 'IO::Special','Iterable','Iterator','Junction','Kernel','Label','List', - 'Lock','Lock::Async','long','longlong','Macro','Map','Match', - 'Metamodel::AttributeContainer','Metamodel::C3MRO','Metamodel::ClassHOW', - 'Metamodel::EnumHOW','Metamodel::Finalization','Metamodel::MethodContainer', - 'Metamodel::MROBasedMethodDispatch','Metamodel::MultipleInheritance', - 'Metamodel::Naming','Metamodel::Primitives','Metamodel::PrivateMethodContainer', - 'Metamodel::RoleContainer','Metamodel::Trusting','Method','Mix','MixHash', - 'Mixy','Mu','NFC','NFD','NFKC','NFKD','Nil','Num','num32','num64', - 'Numeric','NumStr','ObjAt','Order','Pair','Parameter','Perl','Pod::Block', - 'Pod::Block::Code','Pod::Block::Comment','Pod::Block::Declarator', - 'Pod::Block::Named','Pod::Block::Para','Pod::Block::Table','Pod::Heading', - 'Pod::Item','Pointer','Positional','PositionalBindFailover','Proc', - 'Proc::Async','Promise','Proxy','PseudoStash','QuantHash','Range','Rat', - 'Rational','RatStr','Real','Regex','Routine','Scalar','Scheduler', - 'Semaphore','Seq','Set','SetHash','Setty','Signature','size_t','Slip', - 'Stash','Str','StrDistance','Stringy','Sub','Submethod','Supplier', - 'Supplier::Preserving','Supply','Systemic','Tap','Telemetry', - 'Telemetry::Instrument::Thread','Telemetry::Instrument::Usage', - 'Telemetry::Period','Telemetry::Sampler','Thread','ThreadPoolScheduler', - 'UInt','uint16','uint32','uint64','uint8','Uni','utf8','Variable', - 'Version','VM','Whatever','WhateverCode','WrapHandle' - ) - - PERL6_OPERATORS = ( - 'X', 'Z', 'after', 'also', 'and', 'andthen', 'before', 'cmp', 'div', - 'eq', 'eqv', 'extra', 'ff', 'fff', 'ge', 'gt', 'le', 'leg', 'lt', 'm', - 'mm', 'mod', 'ne', 'or', 'orelse', 'rx', 's', 'tr', 'x', 'xor', 'xx', - '++', '--', '**', '!', '+', '-', '~', '?', '|', '||', '+^', '~^', '?^', - '^', '*', '/', '%', '%%', '+&', '+<', '+>', '~&', '~<', '~>', '?&', - 'gcd', 'lcm', '+', '-', '+|', '+^', '~|', '~^', '?|', '?^', - '~', '&', '^', 'but', 'does', '<=>', '..', '..^', '^..', '^..^', - '!=', '==', '<', '<=', '>', '>=', '~~', '===', '!eqv', - '&&', '||', '^^', '//', 'min', 'max', '??', '!!', 'ff', 'fff', 'so', - 'not', '<==', '==>', '<<==', '==>>','unicmp', - ) - - # Perl 6 has a *lot* of possible bracketing characters - # this list was lifted from STD.pm6 (https://github.com/perl6/std) - PERL6_BRACKETS = { - u'\u0028': u'\u0029', u'\u003c': u'\u003e', u'\u005b': u'\u005d', - u'\u007b': u'\u007d', u'\u00ab': u'\u00bb', u'\u0f3a': u'\u0f3b', - u'\u0f3c': u'\u0f3d', u'\u169b': u'\u169c', u'\u2018': u'\u2019', - u'\u201a': u'\u2019', u'\u201b': u'\u2019', u'\u201c': u'\u201d', - u'\u201e': u'\u201d', u'\u201f': u'\u201d', u'\u2039': u'\u203a', - u'\u2045': u'\u2046', u'\u207d': u'\u207e', u'\u208d': u'\u208e', - u'\u2208': u'\u220b', u'\u2209': u'\u220c', u'\u220a': u'\u220d', - u'\u2215': u'\u29f5', u'\u223c': u'\u223d', u'\u2243': u'\u22cd', - u'\u2252': u'\u2253', u'\u2254': u'\u2255', u'\u2264': u'\u2265', - u'\u2266': u'\u2267', u'\u2268': u'\u2269', u'\u226a': u'\u226b', - u'\u226e': u'\u226f', u'\u2270': u'\u2271', u'\u2272': u'\u2273', - u'\u2274': u'\u2275', u'\u2276': u'\u2277', u'\u2278': u'\u2279', - u'\u227a': u'\u227b', u'\u227c': u'\u227d', u'\u227e': u'\u227f', - u'\u2280': u'\u2281', u'\u2282': u'\u2283', u'\u2284': u'\u2285', - u'\u2286': u'\u2287', u'\u2288': u'\u2289', u'\u228a': u'\u228b', - u'\u228f': u'\u2290', u'\u2291': u'\u2292', u'\u2298': u'\u29b8', - u'\u22a2': u'\u22a3', u'\u22a6': u'\u2ade', u'\u22a8': u'\u2ae4', - u'\u22a9': u'\u2ae3', u'\u22ab': u'\u2ae5', u'\u22b0': u'\u22b1', - u'\u22b2': u'\u22b3', u'\u22b4': u'\u22b5', u'\u22b6': u'\u22b7', - u'\u22c9': u'\u22ca', u'\u22cb': u'\u22cc', u'\u22d0': u'\u22d1', - u'\u22d6': u'\u22d7', u'\u22d8': u'\u22d9', u'\u22da': u'\u22db', - u'\u22dc': u'\u22dd', u'\u22de': u'\u22df', u'\u22e0': u'\u22e1', - u'\u22e2': u'\u22e3', u'\u22e4': u'\u22e5', u'\u22e6': u'\u22e7', - u'\u22e8': u'\u22e9', u'\u22ea': u'\u22eb', u'\u22ec': u'\u22ed', - u'\u22f0': u'\u22f1', u'\u22f2': u'\u22fa', u'\u22f3': u'\u22fb', - u'\u22f4': u'\u22fc', u'\u22f6': u'\u22fd', u'\u22f7': u'\u22fe', - u'\u2308': u'\u2309', u'\u230a': u'\u230b', u'\u2329': u'\u232a', - u'\u23b4': u'\u23b5', u'\u2768': u'\u2769', u'\u276a': u'\u276b', - u'\u276c': u'\u276d', u'\u276e': u'\u276f', u'\u2770': u'\u2771', - u'\u2772': u'\u2773', u'\u2774': u'\u2775', u'\u27c3': u'\u27c4', - u'\u27c5': u'\u27c6', u'\u27d5': u'\u27d6', u'\u27dd': u'\u27de', - u'\u27e2': u'\u27e3', u'\u27e4': u'\u27e5', u'\u27e6': u'\u27e7', - u'\u27e8': u'\u27e9', u'\u27ea': u'\u27eb', u'\u2983': u'\u2984', - u'\u2985': u'\u2986', u'\u2987': u'\u2988', u'\u2989': u'\u298a', - u'\u298b': u'\u298c', u'\u298d': u'\u298e', u'\u298f': u'\u2990', - u'\u2991': u'\u2992', u'\u2993': u'\u2994', u'\u2995': u'\u2996', - u'\u2997': u'\u2998', u'\u29c0': u'\u29c1', u'\u29c4': u'\u29c5', - u'\u29cf': u'\u29d0', u'\u29d1': u'\u29d2', u'\u29d4': u'\u29d5', - u'\u29d8': u'\u29d9', u'\u29da': u'\u29db', u'\u29f8': u'\u29f9', - u'\u29fc': u'\u29fd', u'\u2a2b': u'\u2a2c', u'\u2a2d': u'\u2a2e', - u'\u2a34': u'\u2a35', u'\u2a3c': u'\u2a3d', u'\u2a64': u'\u2a65', - u'\u2a79': u'\u2a7a', u'\u2a7d': u'\u2a7e', u'\u2a7f': u'\u2a80', - u'\u2a81': u'\u2a82', u'\u2a83': u'\u2a84', u'\u2a8b': u'\u2a8c', - u'\u2a91': u'\u2a92', u'\u2a93': u'\u2a94', u'\u2a95': u'\u2a96', - u'\u2a97': u'\u2a98', u'\u2a99': u'\u2a9a', u'\u2a9b': u'\u2a9c', - u'\u2aa1': u'\u2aa2', u'\u2aa6': u'\u2aa7', u'\u2aa8': u'\u2aa9', - u'\u2aaa': u'\u2aab', u'\u2aac': u'\u2aad', u'\u2aaf': u'\u2ab0', - u'\u2ab3': u'\u2ab4', u'\u2abb': u'\u2abc', u'\u2abd': u'\u2abe', - u'\u2abf': u'\u2ac0', u'\u2ac1': u'\u2ac2', u'\u2ac3': u'\u2ac4', - u'\u2ac5': u'\u2ac6', u'\u2acd': u'\u2ace', u'\u2acf': u'\u2ad0', - u'\u2ad1': u'\u2ad2', u'\u2ad3': u'\u2ad4', u'\u2ad5': u'\u2ad6', - u'\u2aec': u'\u2aed', u'\u2af7': u'\u2af8', u'\u2af9': u'\u2afa', - u'\u2e02': u'\u2e03', u'\u2e04': u'\u2e05', u'\u2e09': u'\u2e0a', - u'\u2e0c': u'\u2e0d', u'\u2e1c': u'\u2e1d', u'\u2e20': u'\u2e21', - u'\u3008': u'\u3009', u'\u300a': u'\u300b', u'\u300c': u'\u300d', - u'\u300e': u'\u300f', u'\u3010': u'\u3011', u'\u3014': u'\u3015', - u'\u3016': u'\u3017', u'\u3018': u'\u3019', u'\u301a': u'\u301b', - u'\u301d': u'\u301e', u'\ufd3e': u'\ufd3f', u'\ufe17': u'\ufe18', - u'\ufe35': u'\ufe36', u'\ufe37': u'\ufe38', u'\ufe39': u'\ufe3a', - u'\ufe3b': u'\ufe3c', u'\ufe3d': u'\ufe3e', u'\ufe3f': u'\ufe40', - u'\ufe41': u'\ufe42', u'\ufe43': u'\ufe44', u'\ufe47': u'\ufe48', - u'\ufe59': u'\ufe5a', u'\ufe5b': u'\ufe5c', u'\ufe5d': u'\ufe5e', - u'\uff08': u'\uff09', u'\uff1c': u'\uff1e', u'\uff3b': u'\uff3d', - u'\uff5b': u'\uff5d', u'\uff5f': u'\uff60', u'\uff62': u'\uff63', - } - - def _build_word_match(words, boundary_regex_fragment=None, prefix='', suffix=''): - if boundary_regex_fragment is None: - return r'\b(' + prefix + r'|'.join(re.escape(x) for x in words) + \ - suffix + r')\b' - else: - return r'(? 0: - next_open_pos = text.find(opening_chars, search_pos + n_chars) - next_close_pos = text.find(closing_chars, search_pos + n_chars) - - if next_close_pos == -1: - next_close_pos = len(text) - nesting_level = 0 - elif next_open_pos != -1 and next_open_pos < next_close_pos: - nesting_level += 1 - search_pos = next_open_pos - else: # next_close_pos < next_open_pos - nesting_level -= 1 - search_pos = next_close_pos - - end_pos = next_close_pos - - if end_pos < 0: # if we didn't find a closer, just highlight the - # rest of the text in this class - end_pos = len(text) - - if adverbs is not None and re.search(r':to\b', adverbs): - heredoc_terminator = text[match.start('delimiter') + n_chars:end_pos] - end_heredoc = re.search(r'^\s*' + re.escape(heredoc_terminator) + - r'\s*$', text[end_pos:], re.MULTILINE) - - if end_heredoc: - end_pos += end_heredoc.end() - else: - end_pos = len(text) - - yield match.start(), token_class, text[match.start():end_pos + n_chars] - context.pos = end_pos + n_chars - - return callback - - def opening_brace_callback(lexer, match, context): - stack = context.stack - - yield match.start(), Text, context.text[match.start():match.end()] - context.pos = match.end() - - # if we encounter an opening brace and we're one level - # below a token state, it means we need to increment - # the nesting level for braces so we know later when - # we should return to the token rules. - if len(stack) > 2 and stack[-2] == 'token': - context.perl6_token_nesting_level += 1 - - def closing_brace_callback(lexer, match, context): - stack = context.stack - - yield match.start(), Text, context.text[match.start():match.end()] - context.pos = match.end() - - # if we encounter a free closing brace and we're one level - # below a token state, it means we need to check the nesting - # level to see if we need to return to the token state. - if len(stack) > 2 and stack[-2] == 'token': - context.perl6_token_nesting_level -= 1 - if context.perl6_token_nesting_level == 0: - stack.pop() - - def embedded_perl6_callback(lexer, match, context): - context.perl6_token_nesting_level = 1 - yield match.start(), Text, context.text[match.start():match.end()] - context.pos = match.end() - context.stack.append('root') - - # If you're modifying these rules, be careful if you need to process '{' or '}' - # characters. We have special logic for processing these characters (due to the fact - # that you can nest Perl 6 code in regex blocks), so if you need to process one of - # them, make sure you also process the corresponding one! - tokens = { - 'common': [ - (r'#[`|=](?P(?P[' + ''.join(PERL6_BRACKETS) + r'])(?P=first_char)*)', - brackets_callback(Comment.Multiline)), - (r'#[^\n]*$', Comment.Single), - (r'^(\s*)=begin\s+(\w+)\b.*?^\1=end\s+\2', Comment.Multiline), - (r'^(\s*)=for.*?\n\s*?\n', Comment.Multiline), - (r'^=.*?\n\s*?\n', Comment.Multiline), - (r'(regex|token|rule)(\s*' + PERL6_IDENTIFIER_RANGE + '+:sym)', - bygroups(Keyword, Name), 'token-sym-brackets'), - (r'(regex|token|rule)(?!' + PERL6_IDENTIFIER_RANGE + r')(\s*' + PERL6_IDENTIFIER_RANGE + '+)?', - bygroups(Keyword, Name), 'pre-token'), - # deal with a special case in the Perl 6 grammar (role q { ... }) - (r'(role)(\s+)(q)(\s*)', bygroups(Keyword, Text, Name, Text)), - (_build_word_match(PERL6_KEYWORDS, PERL6_IDENTIFIER_RANGE), Keyword), - (_build_word_match(PERL6_BUILTIN_CLASSES, PERL6_IDENTIFIER_RANGE, suffix='(?::[UD])?'), - Name.Builtin), - (_build_word_match(PERL6_BUILTINS, PERL6_IDENTIFIER_RANGE), Name.Builtin), - # copied from PerlLexer - (r'[$@%&][.^:?=!~]?' + PERL6_IDENTIFIER_RANGE + u'+(?:<<.*?>>|<.*?>|«.*?»)*', - Name.Variable), - (r'\$[!/](?:<<.*?>>|<.*?>|«.*?»)*', Name.Variable.Global), - (r'::\?\w+', Name.Variable.Global), - (r'[$@%&]\*' + PERL6_IDENTIFIER_RANGE + u'+(?:<<.*?>>|<.*?>|«.*?»)*', - Name.Variable.Global), - (r'\$(?:<.*?>)+', Name.Variable), - (r'(?:q|qq|Q)[a-zA-Z]?\s*(?P:[\w\s:]+)?\s*(?P(?P[^0-9a-zA-Z:\s])' - r'(?P=first_char)*)', brackets_callback(String)), - # copied from PerlLexer - (r'0_?[0-7]+(_[0-7]+)*', Number.Oct), - (r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex), - (r'0b[01]+(_[01]+)*', Number.Bin), - (r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?', - Number.Float), - (r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float), - (r'\d+(_\d+)*', Number.Integer), - (r'(?<=~~)\s*/(?:\\\\|\\/|.)*?/', String.Regex), - (r'(?<=[=(,])\s*/(?:\\\\|\\/|.)*?/', String.Regex), - (r'm\w+(?=\()', Name), - (r'(?:m|ms|rx)\s*(?P:[\w\s:]+)?\s*(?P(?P[^\w:\s])' - r'(?P=first_char)*)', brackets_callback(String.Regex)), - (r'(?:s|ss|tr)\s*(?::[\w\s:]+)?\s*/(?:\\\\|\\/|.)*?/(?:\\\\|\\/|.)*?/', - String.Regex), - (r'<[^\s=].*?\S>', String), - (_build_word_match(PERL6_OPERATORS), Operator), - (r'\w' + PERL6_IDENTIFIER_RANGE + '*', Name), - (r"'(\\\\|\\[^\\]|[^'\\])*'", String), - (r'"(\\\\|\\[^\\]|[^"\\])*"', String), - ], - 'root': [ - include('common'), - (r'\{', opening_brace_callback), - (r'\}', closing_brace_callback), - (r'.+?', Text), - ], - 'pre-token': [ - include('common'), - (r'\{', Text, ('#pop', 'token')), - (r'.+?', Text), - ], - 'token-sym-brackets': [ - (r'(?P(?P[' + ''.join(PERL6_BRACKETS) + '])(?P=first_char)*)', - brackets_callback(Name), ('#pop', 'pre-token')), - default(('#pop', 'pre-token')), - ], - 'token': [ - (r'\}', Text, '#pop'), - (r'(?<=:)(?:my|our|state|constant|temp|let).*?;', using(this)), - # make sure that quotes in character classes aren't treated as strings - (r'<(?:[-!?+.]\s*)?\[.*?\]>', String.Regex), - # make sure that '#' characters in quotes aren't treated as comments - (r"(?my|our)\s+)?(?:module|class|role|enum|grammar)', line) - if class_decl: - if saw_perl_decl or class_decl.group('scope') is not None: - return True - rating = 0.05 - continue - break - - return rating - - def __init__(self, **options): - super(Perl6Lexer, self).__init__(**options) - self.encoding = options.get('encoding', 'utf-8') +# -*- coding: utf-8 -*- +""" + pygments.lexers.perl + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for Perl, Raku and related languages. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \ + using, this, default, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation +from pygments.util import shebang_matches + +__all__ = ['PerlLexer', 'Perl6Lexer'] + + +class PerlLexer(RegexLexer): + """ + For `Perl `_ source code. + """ + + name = 'Perl' + aliases = ['perl', 'pl'] + filenames = ['*.pl', '*.pm', '*.t', '*.perl'] + mimetypes = ['text/x-perl', 'application/x-perl'] + + flags = re.DOTALL | re.MULTILINE + # TODO: give this to a perl guy who knows how to parse perl... + tokens = { + 'balanced-regex': [ + (r'/(\\\\|\\[^\\]|[^\\/])*/[egimosx]*', String.Regex, '#pop'), + (r'!(\\\\|\\[^\\]|[^\\!])*![egimosx]*', String.Regex, '#pop'), + (r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'), + (r'\{(\\\\|\\[^\\]|[^\\}])*\}[egimosx]*', String.Regex, '#pop'), + (r'<(\\\\|\\[^\\]|[^\\>])*>[egimosx]*', String.Regex, '#pop'), + (r'\[(\\\\|\\[^\\]|[^\\\]])*\][egimosx]*', String.Regex, '#pop'), + (r'\((\\\\|\\[^\\]|[^\\)])*\)[egimosx]*', String.Regex, '#pop'), + (r'@(\\\\|\\[^\\]|[^\\@])*@[egimosx]*', String.Regex, '#pop'), + (r'%(\\\\|\\[^\\]|[^\\%])*%[egimosx]*', String.Regex, '#pop'), + (r'\$(\\\\|\\[^\\]|[^\\$])*\$[egimosx]*', String.Regex, '#pop'), + ], + 'root': [ + (r'\A\#!.+?$', Comment.Hashbang), + (r'\#.*?$', Comment.Single), + (r'^=[a-zA-Z0-9]+\s+.*?\n=cut', Comment.Multiline), + (words(( + 'case', 'continue', 'do', 'else', 'elsif', 'for', 'foreach', + 'if', 'last', 'my', 'next', 'our', 'redo', 'reset', 'then', + 'unless', 'until', 'while', 'print', 'new', 'BEGIN', + 'CHECK', 'INIT', 'END', 'return'), suffix=r'\b'), + Keyword), + (r'(format)(\s+)(\w+)(\s*)(=)(\s*\n)', + bygroups(Keyword, Text, Name, Text, Punctuation, Text), 'format'), + (r'(eq|lt|gt|le|ge|ne|not|and|or|cmp)\b', Operator.Word), + # common delimiters + (r's/(\\\\|\\[^\\]|[^\\/])*/(\\\\|\\[^\\]|[^\\/])*/[egimosx]*', + String.Regex), + (r's!(\\\\|\\!|[^!])*!(\\\\|\\!|[^!])*![egimosx]*', String.Regex), + (r's\\(\\\\|[^\\])*\\(\\\\|[^\\])*\\[egimosx]*', String.Regex), + (r's@(\\\\|\\[^\\]|[^\\@])*@(\\\\|\\[^\\]|[^\\@])*@[egimosx]*', + String.Regex), + (r's%(\\\\|\\[^\\]|[^\\%])*%(\\\\|\\[^\\]|[^\\%])*%[egimosx]*', + String.Regex), + # balanced delimiters + (r's\{(\\\\|\\[^\\]|[^\\}])*\}\s*', String.Regex, 'balanced-regex'), + (r's<(\\\\|\\[^\\]|[^\\>])*>\s*', String.Regex, 'balanced-regex'), + (r's\[(\\\\|\\[^\\]|[^\\\]])*\]\s*', String.Regex, + 'balanced-regex'), + (r's\((\\\\|\\[^\\]|[^\\)])*\)\s*', String.Regex, + 'balanced-regex'), + + (r'm?/(\\\\|\\[^\\]|[^\\/\n])*/[gcimosx]*', String.Regex), + (r'm(?=[/!\\{<\[(@%$])', String.Regex, 'balanced-regex'), + (r'((?<==~)|(?<=\())\s*/(\\\\|\\[^\\]|[^\\/])*/[gcimosx]*', + String.Regex), + (r'\s+', Text), + (words(( + 'abs', 'accept', 'alarm', 'atan2', 'bind', 'binmode', 'bless', 'caller', 'chdir', + 'chmod', 'chomp', 'chop', 'chown', 'chr', 'chroot', 'close', 'closedir', 'connect', + 'continue', 'cos', 'crypt', 'dbmclose', 'dbmopen', 'defined', 'delete', 'die', + 'dump', 'each', 'endgrent', 'endhostent', 'endnetent', 'endprotoent', + 'endpwent', 'endservent', 'eof', 'eval', 'exec', 'exists', 'exit', 'exp', 'fcntl', + 'fileno', 'flock', 'fork', 'format', 'formline', 'getc', 'getgrent', 'getgrgid', + 'getgrnam', 'gethostbyaddr', 'gethostbyname', 'gethostent', 'getlogin', + 'getnetbyaddr', 'getnetbyname', 'getnetent', 'getpeername', 'getpgrp', + 'getppid', 'getpriority', 'getprotobyname', 'getprotobynumber', + 'getprotoent', 'getpwent', 'getpwnam', 'getpwuid', 'getservbyname', + 'getservbyport', 'getservent', 'getsockname', 'getsockopt', 'glob', 'gmtime', + 'goto', 'grep', 'hex', 'import', 'index', 'int', 'ioctl', 'join', 'keys', 'kill', 'last', + 'lc', 'lcfirst', 'length', 'link', 'listen', 'local', 'localtime', 'log', 'lstat', + 'map', 'mkdir', 'msgctl', 'msgget', 'msgrcv', 'msgsnd', 'my', 'next', 'oct', 'open', + 'opendir', 'ord', 'our', 'pack', 'pipe', 'pop', 'pos', 'printf', + 'prototype', 'push', 'quotemeta', 'rand', 'read', 'readdir', + 'readline', 'readlink', 'readpipe', 'recv', 'redo', 'ref', 'rename', + 'reverse', 'rewinddir', 'rindex', 'rmdir', 'scalar', 'seek', 'seekdir', + 'select', 'semctl', 'semget', 'semop', 'send', 'setgrent', 'sethostent', 'setnetent', + 'setpgrp', 'setpriority', 'setprotoent', 'setpwent', 'setservent', + 'setsockopt', 'shift', 'shmctl', 'shmget', 'shmread', 'shmwrite', 'shutdown', + 'sin', 'sleep', 'socket', 'socketpair', 'sort', 'splice', 'split', 'sprintf', 'sqrt', + 'srand', 'stat', 'study', 'substr', 'symlink', 'syscall', 'sysopen', 'sysread', + 'sysseek', 'system', 'syswrite', 'tell', 'telldir', 'tie', 'tied', 'time', 'times', 'tr', + 'truncate', 'uc', 'ucfirst', 'umask', 'undef', 'unlink', 'unpack', 'unshift', 'untie', + 'utime', 'values', 'vec', 'wait', 'waitpid', 'wantarray', 'warn', 'write'), suffix=r'\b'), + Name.Builtin), + (r'((__(DATA|DIE|WARN)__)|(STD(IN|OUT|ERR)))\b', Name.Builtin.Pseudo), + (r'(<<)([\'"]?)([a-zA-Z_]\w*)(\2;?\n.*?\n)(\3)(\n)', + bygroups(String, String, String.Delimiter, String, String.Delimiter, Text)), + (r'__END__', Comment.Preproc, 'end-part'), + (r'\$\^[ADEFHILMOPSTWX]', Name.Variable.Global), + (r"\$[\\\"\[\]'&`+*.,;=%~?@$!<>(^|/-](?!\w)", Name.Variable.Global), + (r'[$@%#]+', Name.Variable, 'varname'), + (r'0_?[0-7]+(_[0-7]+)*', Number.Oct), + (r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex), + (r'0b[01]+(_[01]+)*', Number.Bin), + (r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?', + Number.Float), + (r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float), + (r'\d+(_\d+)*', Number.Integer), + (r"'(\\\\|\\[^\\]|[^'\\])*'", String), + (r'"(\\\\|\\[^\\]|[^"\\])*"', String), + (r'`(\\\\|\\[^\\]|[^`\\])*`', String.Backtick), + (r'<([^\s>]+)>', String.Regex), + (r'(q|qq|qw|qr|qx)\{', String.Other, 'cb-string'), + (r'(q|qq|qw|qr|qx)\(', String.Other, 'rb-string'), + (r'(q|qq|qw|qr|qx)\[', String.Other, 'sb-string'), + (r'(q|qq|qw|qr|qx)\<', String.Other, 'lt-string'), + (r'(q|qq|qw|qr|qx)([\W_])(.|\n)*?\2', String.Other), + (r'(package)(\s+)([a-zA-Z_]\w*(?:::[a-zA-Z_]\w*)*)', + bygroups(Keyword, Text, Name.Namespace)), + (r'(use|require|no)(\s+)([a-zA-Z_]\w*(?:::[a-zA-Z_]\w*)*)', + bygroups(Keyword, Text, Name.Namespace)), + (r'(sub)(\s+)', bygroups(Keyword, Text), 'funcname'), + (words(( + 'no', 'package', 'require', 'use'), suffix=r'\b'), + Keyword), + (r'(\[\]|\*\*|::|<<|>>|>=|<=>|<=|={3}|!=|=~|' + r'!~|&&?|\|\||\.{1,3})', Operator), + (r'[-+/*%=<>&^|!\\~]=?', Operator), + (r'[()\[\]:;,<>/?{}]', Punctuation), # yes, there's no shortage + # of punctuation in Perl! + (r'(?=\w)', Name, 'name'), + ], + 'format': [ + (r'\.\n', String.Interpol, '#pop'), + (r'[^\n]*\n', String.Interpol), + ], + 'varname': [ + (r'\s+', Text), + (r'\{', Punctuation, '#pop'), # hash syntax? + (r'\)|,', Punctuation, '#pop'), # argument specifier + (r'\w+::', Name.Namespace), + (r'[\w:]+', Name.Variable, '#pop'), + ], + 'name': [ + (r'[a-zA-Z_]\w*(::[a-zA-Z_]\w*)*(::)?(?=\s*->)', Name.Namespace, '#pop'), + (r'[a-zA-Z_]\w*(::[a-zA-Z_]\w*)*::', Name.Namespace, '#pop'), + (r'[\w:]+', Name, '#pop'), + (r'[A-Z_]+(?=\W)', Name.Constant, '#pop'), + (r'(?=\W)', Text, '#pop'), + ], + 'funcname': [ + (r'[a-zA-Z_]\w*[!?]?', Name.Function), + (r'\s+', Text), + # argument declaration + (r'(\([$@%]*\))(\s*)', bygroups(Punctuation, Text)), + (r';', Punctuation, '#pop'), + (r'.*?\{', Punctuation, '#pop'), + ], + 'cb-string': [ + (r'\\[{}\\]', String.Other), + (r'\\', String.Other), + (r'\{', String.Other, 'cb-string'), + (r'\}', String.Other, '#pop'), + (r'[^{}\\]+', String.Other) + ], + 'rb-string': [ + (r'\\[()\\]', String.Other), + (r'\\', String.Other), + (r'\(', String.Other, 'rb-string'), + (r'\)', String.Other, '#pop'), + (r'[^()]+', String.Other) + ], + 'sb-string': [ + (r'\\[\[\]\\]', String.Other), + (r'\\', String.Other), + (r'\[', String.Other, 'sb-string'), + (r'\]', String.Other, '#pop'), + (r'[^\[\]]+', String.Other) + ], + 'lt-string': [ + (r'\\[<>\\]', String.Other), + (r'\\', String.Other), + (r'\<', String.Other, 'lt-string'), + (r'\>', String.Other, '#pop'), + (r'[^<>]+', String.Other) + ], + 'end-part': [ + (r'.+', Comment.Preproc, '#pop') + ] + } + + def analyse_text(text): + if shebang_matches(text, r'perl'): + return True + if re.search(r'(?:my|our)\s+[$@%(]', text): + return 0.9 + + +class Perl6Lexer(ExtendedRegexLexer): + """ + For `Raku `_ (a.k.a. Perl 6) source code. + + .. versionadded:: 2.0 + """ + + name = 'Perl6' + aliases = ['perl6', 'pl6', 'raku'] + filenames = ['*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', + '*.6pm', '*.p6m', '*.pm6', '*.t', '*.raku', '*.rakumod', + '*.rakutest', '*.rakudoc'] + mimetypes = ['text/x-perl6', 'application/x-perl6'] + flags = re.MULTILINE | re.DOTALL | re.UNICODE + + PERL6_IDENTIFIER_RANGE = r"['\w:-]" + + PERL6_KEYWORDS = ( + #Phasers + 'BEGIN','CATCH','CHECK','CLOSE','CONTROL','DOC','END','ENTER','FIRST', + 'INIT','KEEP','LAST','LEAVE','NEXT','POST','PRE','QUIT','UNDO', + #Keywords + 'anon','augment','but','class','constant','default','does','else', + 'elsif','enum','for','gather','given','grammar','has','if','import', + 'is','let','loop','made','make','method','module','multi','my','need', + 'orwith','our','proceed','proto','repeat','require','return', + 'return-rw','returns','role','rule','state','sub','submethod','subset', + 'succeed','supersede','token','try','unit','unless','until','use', + 'when','while','with','without', + #Traits + 'export','native','repr','required','rw','symbol', + ) + + PERL6_BUILTINS = ( + 'ACCEPTS','abs','abs2rel','absolute','accept','accessed','acos', + 'acosec','acosech','acosh','acotan','acotanh','acquire','act','action', + 'actions','add','add_attribute','add_enum_value','add_fallback', + 'add_method','add_parent','add_private_method','add_role','add_trustee', + 'adverb','after','all','allocate','allof','allowed','alternative-names', + 'annotations','antipair','antipairs','any','anyof','app_lifetime', + 'append','arch','archname','args','arity','Array','asec','asech','asin', + 'asinh','ASSIGN-KEY','ASSIGN-POS','assuming','ast','at','atan','atan2', + 'atanh','AT-KEY','atomic-assign','atomic-dec-fetch','atomic-fetch', + 'atomic-fetch-add','atomic-fetch-dec','atomic-fetch-inc', + 'atomic-fetch-sub','atomic-inc-fetch','AT-POS','attributes','auth', + 'await','backtrace','Bag','BagHash','bail-out','base','basename', + 'base-repeating','batch','BIND-KEY','BIND-POS','bind-stderr', + 'bind-stdin','bind-stdout','bind-udp','bits','bless','block','Bool', + 'bool-only','bounds','break','Bridge','broken','BUILD','build-date', + 'bytes','cache','callframe','calling-package','CALL-ME','callsame', + 'callwith','can','cancel','candidates','cando','can-ok','canonpath', + 'caps','caption','Capture','cas','catdir','categorize','categorize-list', + 'catfile','catpath','cause','ceiling','cglobal','changed','Channel', + 'chars','chdir','child','child-name','child-typename','chmod','chomp', + 'chop','chr','chrs','chunks','cis','classify','classify-list','cleanup', + 'clone','close','closed','close-stdin','cmp-ok','code','codes','collate', + 'column','comb','combinations','command','comment','compiler','Complex', + 'compose','compose_type','composer','condition','config', + 'configure_destroy','configure_type_checking','conj','connect', + 'constraints','construct','contains','contents','copy','cos','cosec', + 'cosech','cosh','cotan','cotanh','count','count-only','cpu-cores', + 'cpu-usage','CREATE','create_type','cross','cue','curdir','curupdir','d', + 'Date','DateTime','day','daycount','day-of-month','day-of-week', + 'day-of-year','days-in-month','declaration','decode','decoder','deepmap', + 'default','defined','DEFINITE','delayed','DELETE-KEY','DELETE-POS', + 'denominator','desc','DESTROY','destroyers','devnull','diag', + 'did-you-mean','die','dies-ok','dir','dirname','dir-sep','DISTROnames', + 'do','does','does-ok','done','done-testing','duckmap','dynamic','e', + 'eager','earlier','elems','emit','enclosing','encode','encoder', + 'encoding','end','ends-with','enum_from_value','enum_value_list', + 'enum_values','enums','eof','EVAL','eval-dies-ok','EVALFILE', + 'eval-lives-ok','exception','excludes-max','excludes-min','EXISTS-KEY', + 'EXISTS-POS','exit','exitcode','exp','expected','explicitly-manage', + 'expmod','extension','f','fail','fails-like','fc','feature','file', + 'filename','find_method','find_method_qualified','finish','first','flat', + 'flatmap','flip','floor','flunk','flush','fmt','format','formatter', + 'freeze','from','from-list','from-loop','from-posix','full', + 'full-barrier','get','get_value','getc','gist','got','grab','grabpairs', + 'grep','handle','handled','handles','hardware','has_accessor','Hash', + 'head','headers','hh-mm-ss','hidden','hides','hour','how','hyper','id', + 'illegal','im','in','indent','index','indices','indir','infinite', + 'infix','infix:<+>','infix:<->','install_method_cache','Instant', + 'instead','Int','int-bounds','interval','in-timezone','invalid-str', + 'invert','invocant','IO','IO::Notification.watch-path','is_trusted', + 'is_type','isa','is-absolute','isa-ok','is-approx','is-deeply', + 'is-hidden','is-initial-thread','is-int','is-lazy','is-leap-year', + 'isNaN','isnt','is-prime','is-relative','is-routine','is-setting', + 'is-win','item','iterator','join','keep','kept','KERNELnames','key', + 'keyof','keys','kill','kv','kxxv','l','lang','last','lastcall','later', + 'lazy','lc','leading','level','like','line','lines','link','List', + 'listen','live','lives-ok','local','lock','log','log10','lookup','lsb', + 'made','MAIN','make','Map','match','max','maxpairs','merge','message', + 'method','method_table','methods','migrate','min','minmax','minpairs', + 'minute','misplaced','Mix','MixHash','mkdir','mode','modified','month', + 'move','mro','msb','multi','multiness','my','name','named','named_names', + 'narrow','nativecast','native-descriptor','nativesizeof','new','new_type', + 'new-from-daycount','new-from-pairs','next','nextcallee','next-handle', + 'nextsame','nextwith','NFC','NFD','NFKC','NFKD','nl-in','nl-out', + 'nodemap','nok','none','norm','not','note','now','nude','Num', + 'numerator','Numeric','of','offset','offset-in-hours','offset-in-minutes', + 'ok','old','on-close','one','on-switch','open','opened','operation', + 'optional','ord','ords','orig','os-error','osname','out-buffer','pack', + 'package','package-kind','package-name','packages','pair','pairs', + 'pairup','parameter','params','parent','parent-name','parents','parse', + 'parse-base','parsefile','parse-names','parts','pass','path','path-sep', + 'payload','peer-host','peer-port','periods','perl','permutations','phaser', + 'pick','pickpairs','pid','placeholder','plan','plus','polar','poll', + 'polymod','pop','pos','positional','posix','postfix','postmatch', + 'precomp-ext','precomp-target','pred','prefix','prematch','prepend', + 'print','printf','print-nl','print-to','private','private_method_table', + 'proc','produce','Promise','prompt','protect','pull-one','push', + 'push-all','push-at-least','push-exactly','push-until-lazy','put', + 'qualifier-type','quit','r','race','radix','rand','range','Rat','raw', + 're','read','readchars','readonly','ready','Real','reallocate','reals', + 'reason','rebless','receive','recv','redispatcher','redo','reduce', + 'rel2abs','relative','release','rename','repeated','replacement', + 'report','reserved','resolve','restore','result','resume','rethrow', + 'reverse','right','rindex','rmdir','role','roles_to_compose','rolish', + 'roll','rootdir','roots','rotate','rotor','round','roundrobin', + 'routine-type','run','rwx','s','samecase','samemark','samewith','say', + 'schedule-on','scheduler','scope','sec','sech','second','seek','self', + 'send','Set','set_hidden','set_name','set_package','set_rw','set_value', + 'SetHash','set-instruments','setup_finalization','shape','share','shell', + 'shift','sibling','sigil','sign','signal','signals','signature','sin', + 'sinh','sink','sink-all','skip','skip-at-least','skip-at-least-pull-one', + 'skip-one','skip-rest','sleep','sleep-timer','sleep-until','Slip','slurp', + 'slurp-rest','slurpy','snap','snapper','so','socket-host','socket-port', + 'sort','source','source-package','spawn','SPEC','splice','split', + 'splitdir','splitpath','sprintf','spurt','sqrt','squish','srand','stable', + 'start','started','starts-with','status','stderr','stdout','Str', + 'sub_signature','subbuf','subbuf-rw','subname','subparse','subst', + 'subst-mutate','substr','substr-eq','substr-rw','subtest','succ','sum', + 'Supply','symlink','t','tail','take','take-rw','tan','tanh','tap', + 'target','target-name','tc','tclc','tell','then','throttle','throw', + 'throws-like','timezone','tmpdir','to','today','todo','toggle','to-posix', + 'total','trailing','trans','tree','trim','trim-leading','trim-trailing', + 'truncate','truncated-to','trusts','try_acquire','trying','twigil','type', + 'type_captures','typename','uc','udp','uncaught_handler','unimatch', + 'uniname','uninames','uniparse','uniprop','uniprops','unique','unival', + 'univals','unlike','unlink','unlock','unpack','unpolar','unshift', + 'unwrap','updir','USAGE','use-ok','utc','val','value','values','VAR', + 'variable','verbose-config','version','VMnames','volume','vow','w','wait', + 'warn','watch','watch-path','week','weekday-of-month','week-number', + 'week-year','WHAT','when','WHERE','WHEREFORE','WHICH','WHO', + 'whole-second','WHY','wordcase','words','workaround','wrap','write', + 'write-to','x','yada','year','yield','yyyy-mm-dd','z','zip','zip-latest', + + ) + + PERL6_BUILTIN_CLASSES = ( + #Booleans + 'False','True', + #Classes + 'Any','Array','Associative','AST','atomicint','Attribute','Backtrace', + 'Backtrace::Frame','Bag','Baggy','BagHash','Blob','Block','Bool','Buf', + 'Callable','CallFrame','Cancellation','Capture','CArray','Channel','Code', + 'compiler','Complex','ComplexStr','Cool','CurrentThreadScheduler', + 'Cursor','Date','Dateish','DateTime','Distro','Duration','Encoding', + 'Exception','Failure','FatRat','Grammar','Hash','HyperWhatever','Instant', + 'Int','int16','int32','int64','int8','IntStr','IO','IO::ArgFiles', + 'IO::CatHandle','IO::Handle','IO::Notification','IO::Path', + 'IO::Path::Cygwin','IO::Path::QNX','IO::Path::Unix','IO::Path::Win32', + 'IO::Pipe','IO::Socket','IO::Socket::Async','IO::Socket::INET','IO::Spec', + 'IO::Spec::Cygwin','IO::Spec::QNX','IO::Spec::Unix','IO::Spec::Win32', + 'IO::Special','Iterable','Iterator','Junction','Kernel','Label','List', + 'Lock','Lock::Async','long','longlong','Macro','Map','Match', + 'Metamodel::AttributeContainer','Metamodel::C3MRO','Metamodel::ClassHOW', + 'Metamodel::EnumHOW','Metamodel::Finalization','Metamodel::MethodContainer', + 'Metamodel::MROBasedMethodDispatch','Metamodel::MultipleInheritance', + 'Metamodel::Naming','Metamodel::Primitives','Metamodel::PrivateMethodContainer', + 'Metamodel::RoleContainer','Metamodel::Trusting','Method','Mix','MixHash', + 'Mixy','Mu','NFC','NFD','NFKC','NFKD','Nil','Num','num32','num64', + 'Numeric','NumStr','ObjAt','Order','Pair','Parameter','Perl','Pod::Block', + 'Pod::Block::Code','Pod::Block::Comment','Pod::Block::Declarator', + 'Pod::Block::Named','Pod::Block::Para','Pod::Block::Table','Pod::Heading', + 'Pod::Item','Pointer','Positional','PositionalBindFailover','Proc', + 'Proc::Async','Promise','Proxy','PseudoStash','QuantHash','Range','Rat', + 'Rational','RatStr','Real','Regex','Routine','Scalar','Scheduler', + 'Semaphore','Seq','Set','SetHash','Setty','Signature','size_t','Slip', + 'Stash','Str','StrDistance','Stringy','Sub','Submethod','Supplier', + 'Supplier::Preserving','Supply','Systemic','Tap','Telemetry', + 'Telemetry::Instrument::Thread','Telemetry::Instrument::Usage', + 'Telemetry::Period','Telemetry::Sampler','Thread','ThreadPoolScheduler', + 'UInt','uint16','uint32','uint64','uint8','Uni','utf8','Variable', + 'Version','VM','Whatever','WhateverCode','WrapHandle' + ) + + PERL6_OPERATORS = ( + 'X', 'Z', 'after', 'also', 'and', 'andthen', 'before', 'cmp', 'div', + 'eq', 'eqv', 'extra', 'ff', 'fff', 'ge', 'gt', 'le', 'leg', 'lt', 'm', + 'mm', 'mod', 'ne', 'or', 'orelse', 'rx', 's', 'tr', 'x', 'xor', 'xx', + '++', '--', '**', '!', '+', '-', '~', '?', '|', '||', '+^', '~^', '?^', + '^', '*', '/', '%', '%%', '+&', '+<', '+>', '~&', '~<', '~>', '?&', + 'gcd', 'lcm', '+', '-', '+|', '+^', '~|', '~^', '?|', '?^', + '~', '&', '^', 'but', 'does', '<=>', '..', '..^', '^..', '^..^', + '!=', '==', '<', '<=', '>', '>=', '~~', '===', '!eqv', + '&&', '||', '^^', '//', 'min', 'max', '??', '!!', 'ff', 'fff', 'so', + 'not', '<==', '==>', '<<==', '==>>','unicmp', + ) + + # Perl 6 has a *lot* of possible bracketing characters + # this list was lifted from STD.pm6 (https://github.com/perl6/std) + PERL6_BRACKETS = { + '\u0028': '\u0029', '\u003c': '\u003e', '\u005b': '\u005d', + '\u007b': '\u007d', '\u00ab': '\u00bb', '\u0f3a': '\u0f3b', + '\u0f3c': '\u0f3d', '\u169b': '\u169c', '\u2018': '\u2019', + '\u201a': '\u2019', '\u201b': '\u2019', '\u201c': '\u201d', + '\u201e': '\u201d', '\u201f': '\u201d', '\u2039': '\u203a', + '\u2045': '\u2046', '\u207d': '\u207e', '\u208d': '\u208e', + '\u2208': '\u220b', '\u2209': '\u220c', '\u220a': '\u220d', + '\u2215': '\u29f5', '\u223c': '\u223d', '\u2243': '\u22cd', + '\u2252': '\u2253', '\u2254': '\u2255', '\u2264': '\u2265', + '\u2266': '\u2267', '\u2268': '\u2269', '\u226a': '\u226b', + '\u226e': '\u226f', '\u2270': '\u2271', '\u2272': '\u2273', + '\u2274': '\u2275', '\u2276': '\u2277', '\u2278': '\u2279', + '\u227a': '\u227b', '\u227c': '\u227d', '\u227e': '\u227f', + '\u2280': '\u2281', '\u2282': '\u2283', '\u2284': '\u2285', + '\u2286': '\u2287', '\u2288': '\u2289', '\u228a': '\u228b', + '\u228f': '\u2290', '\u2291': '\u2292', '\u2298': '\u29b8', + '\u22a2': '\u22a3', '\u22a6': '\u2ade', '\u22a8': '\u2ae4', + '\u22a9': '\u2ae3', '\u22ab': '\u2ae5', '\u22b0': '\u22b1', + '\u22b2': '\u22b3', '\u22b4': '\u22b5', '\u22b6': '\u22b7', + '\u22c9': '\u22ca', '\u22cb': '\u22cc', '\u22d0': '\u22d1', + '\u22d6': '\u22d7', '\u22d8': '\u22d9', '\u22da': '\u22db', + '\u22dc': '\u22dd', '\u22de': '\u22df', '\u22e0': '\u22e1', + '\u22e2': '\u22e3', '\u22e4': '\u22e5', '\u22e6': '\u22e7', + '\u22e8': '\u22e9', '\u22ea': '\u22eb', '\u22ec': '\u22ed', + '\u22f0': '\u22f1', '\u22f2': '\u22fa', '\u22f3': '\u22fb', + '\u22f4': '\u22fc', '\u22f6': '\u22fd', '\u22f7': '\u22fe', + '\u2308': '\u2309', '\u230a': '\u230b', '\u2329': '\u232a', + '\u23b4': '\u23b5', '\u2768': '\u2769', '\u276a': '\u276b', + '\u276c': '\u276d', '\u276e': '\u276f', '\u2770': '\u2771', + '\u2772': '\u2773', '\u2774': '\u2775', '\u27c3': '\u27c4', + '\u27c5': '\u27c6', '\u27d5': '\u27d6', '\u27dd': '\u27de', + '\u27e2': '\u27e3', '\u27e4': '\u27e5', '\u27e6': '\u27e7', + '\u27e8': '\u27e9', '\u27ea': '\u27eb', '\u2983': '\u2984', + '\u2985': '\u2986', '\u2987': '\u2988', '\u2989': '\u298a', + '\u298b': '\u298c', '\u298d': '\u298e', '\u298f': '\u2990', + '\u2991': '\u2992', '\u2993': '\u2994', '\u2995': '\u2996', + '\u2997': '\u2998', '\u29c0': '\u29c1', '\u29c4': '\u29c5', + '\u29cf': '\u29d0', '\u29d1': '\u29d2', '\u29d4': '\u29d5', + '\u29d8': '\u29d9', '\u29da': '\u29db', '\u29f8': '\u29f9', + '\u29fc': '\u29fd', '\u2a2b': '\u2a2c', '\u2a2d': '\u2a2e', + '\u2a34': '\u2a35', '\u2a3c': '\u2a3d', '\u2a64': '\u2a65', + '\u2a79': '\u2a7a', '\u2a7d': '\u2a7e', '\u2a7f': '\u2a80', + '\u2a81': '\u2a82', '\u2a83': '\u2a84', '\u2a8b': '\u2a8c', + '\u2a91': '\u2a92', '\u2a93': '\u2a94', '\u2a95': '\u2a96', + '\u2a97': '\u2a98', '\u2a99': '\u2a9a', '\u2a9b': '\u2a9c', + '\u2aa1': '\u2aa2', '\u2aa6': '\u2aa7', '\u2aa8': '\u2aa9', + '\u2aaa': '\u2aab', '\u2aac': '\u2aad', '\u2aaf': '\u2ab0', + '\u2ab3': '\u2ab4', '\u2abb': '\u2abc', '\u2abd': '\u2abe', + '\u2abf': '\u2ac0', '\u2ac1': '\u2ac2', '\u2ac3': '\u2ac4', + '\u2ac5': '\u2ac6', '\u2acd': '\u2ace', '\u2acf': '\u2ad0', + '\u2ad1': '\u2ad2', '\u2ad3': '\u2ad4', '\u2ad5': '\u2ad6', + '\u2aec': '\u2aed', '\u2af7': '\u2af8', '\u2af9': '\u2afa', + '\u2e02': '\u2e03', '\u2e04': '\u2e05', '\u2e09': '\u2e0a', + '\u2e0c': '\u2e0d', '\u2e1c': '\u2e1d', '\u2e20': '\u2e21', + '\u3008': '\u3009', '\u300a': '\u300b', '\u300c': '\u300d', + '\u300e': '\u300f', '\u3010': '\u3011', '\u3014': '\u3015', + '\u3016': '\u3017', '\u3018': '\u3019', '\u301a': '\u301b', + '\u301d': '\u301e', '\ufd3e': '\ufd3f', '\ufe17': '\ufe18', + '\ufe35': '\ufe36', '\ufe37': '\ufe38', '\ufe39': '\ufe3a', + '\ufe3b': '\ufe3c', '\ufe3d': '\ufe3e', '\ufe3f': '\ufe40', + '\ufe41': '\ufe42', '\ufe43': '\ufe44', '\ufe47': '\ufe48', + '\ufe59': '\ufe5a', '\ufe5b': '\ufe5c', '\ufe5d': '\ufe5e', + '\uff08': '\uff09', '\uff1c': '\uff1e', '\uff3b': '\uff3d', + '\uff5b': '\uff5d', '\uff5f': '\uff60', '\uff62': '\uff63', + } + + def _build_word_match(words, boundary_regex_fragment=None, prefix='', suffix=''): + if boundary_regex_fragment is None: + return r'\b(' + prefix + r'|'.join(re.escape(x) for x in words) + \ + suffix + r')\b' + else: + return r'(? 0: + next_open_pos = text.find(opening_chars, search_pos + n_chars) + next_close_pos = text.find(closing_chars, search_pos + n_chars) + + if next_close_pos == -1: + next_close_pos = len(text) + nesting_level = 0 + elif next_open_pos != -1 and next_open_pos < next_close_pos: + nesting_level += 1 + search_pos = next_open_pos + else: # next_close_pos < next_open_pos + nesting_level -= 1 + search_pos = next_close_pos + + end_pos = next_close_pos + + if end_pos < 0: # if we didn't find a closer, just highlight the + # rest of the text in this class + end_pos = len(text) + + if adverbs is not None and re.search(r':to\b', adverbs): + heredoc_terminator = text[match.start('delimiter') + n_chars:end_pos] + end_heredoc = re.search(r'^\s*' + re.escape(heredoc_terminator) + + r'\s*$', text[end_pos:], re.MULTILINE) + + if end_heredoc: + end_pos += end_heredoc.end() + else: + end_pos = len(text) + + yield match.start(), token_class, text[match.start():end_pos + n_chars] + context.pos = end_pos + n_chars + + return callback + + def opening_brace_callback(lexer, match, context): + stack = context.stack + + yield match.start(), Text, context.text[match.start():match.end()] + context.pos = match.end() + + # if we encounter an opening brace and we're one level + # below a token state, it means we need to increment + # the nesting level for braces so we know later when + # we should return to the token rules. + if len(stack) > 2 and stack[-2] == 'token': + context.perl6_token_nesting_level += 1 + + def closing_brace_callback(lexer, match, context): + stack = context.stack + + yield match.start(), Text, context.text[match.start():match.end()] + context.pos = match.end() + + # if we encounter a free closing brace and we're one level + # below a token state, it means we need to check the nesting + # level to see if we need to return to the token state. + if len(stack) > 2 and stack[-2] == 'token': + context.perl6_token_nesting_level -= 1 + if context.perl6_token_nesting_level == 0: + stack.pop() + + def embedded_perl6_callback(lexer, match, context): + context.perl6_token_nesting_level = 1 + yield match.start(), Text, context.text[match.start():match.end()] + context.pos = match.end() + context.stack.append('root') + + # If you're modifying these rules, be careful if you need to process '{' or '}' + # characters. We have special logic for processing these characters (due to the fact + # that you can nest Perl 6 code in regex blocks), so if you need to process one of + # them, make sure you also process the corresponding one! + tokens = { + 'common': [ + (r'#[`|=](?P(?P[' + ''.join(PERL6_BRACKETS) + r'])(?P=first_char)*)', + brackets_callback(Comment.Multiline)), + (r'#[^\n]*$', Comment.Single), + (r'^(\s*)=begin\s+(\w+)\b.*?^\1=end\s+\2', Comment.Multiline), + (r'^(\s*)=for.*?\n\s*?\n', Comment.Multiline), + (r'^=.*?\n\s*?\n', Comment.Multiline), + (r'(regex|token|rule)(\s*' + PERL6_IDENTIFIER_RANGE + '+:sym)', + bygroups(Keyword, Name), 'token-sym-brackets'), + (r'(regex|token|rule)(?!' + PERL6_IDENTIFIER_RANGE + r')(\s*' + PERL6_IDENTIFIER_RANGE + '+)?', + bygroups(Keyword, Name), 'pre-token'), + # deal with a special case in the Perl 6 grammar (role q { ... }) + (r'(role)(\s+)(q)(\s*)', bygroups(Keyword, Text, Name, Text)), + (_build_word_match(PERL6_KEYWORDS, PERL6_IDENTIFIER_RANGE), Keyword), + (_build_word_match(PERL6_BUILTIN_CLASSES, PERL6_IDENTIFIER_RANGE, suffix='(?::[UD])?'), + Name.Builtin), + (_build_word_match(PERL6_BUILTINS, PERL6_IDENTIFIER_RANGE), Name.Builtin), + # copied from PerlLexer + (r'[$@%&][.^:?=!~]?' + PERL6_IDENTIFIER_RANGE + '+(?:<<.*?>>|<.*?>|«.*?»)*', + Name.Variable), + (r'\$[!/](?:<<.*?>>|<.*?>|«.*?»)*', Name.Variable.Global), + (r'::\?\w+', Name.Variable.Global), + (r'[$@%&]\*' + PERL6_IDENTIFIER_RANGE + '+(?:<<.*?>>|<.*?>|«.*?»)*', + Name.Variable.Global), + (r'\$(?:<.*?>)+', Name.Variable), + (r'(?:q|qq|Q)[a-zA-Z]?\s*(?P:[\w\s:]+)?\s*(?P(?P[^0-9a-zA-Z:\s])' + r'(?P=first_char)*)', brackets_callback(String)), + # copied from PerlLexer + (r'0_?[0-7]+(_[0-7]+)*', Number.Oct), + (r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex), + (r'0b[01]+(_[01]+)*', Number.Bin), + (r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?', + Number.Float), + (r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float), + (r'\d+(_\d+)*', Number.Integer), + (r'(?<=~~)\s*/(?:\\\\|\\/|.)*?/', String.Regex), + (r'(?<=[=(,])\s*/(?:\\\\|\\/|.)*?/', String.Regex), + (r'm\w+(?=\()', Name), + (r'(?:m|ms|rx)\s*(?P:[\w\s:]+)?\s*(?P(?P[^\w:\s])' + r'(?P=first_char)*)', brackets_callback(String.Regex)), + (r'(?:s|ss|tr)\s*(?::[\w\s:]+)?\s*/(?:\\\\|\\/|.)*?/(?:\\\\|\\/|.)*?/', + String.Regex), + (r'<[^\s=].*?\S>', String), + (_build_word_match(PERL6_OPERATORS), Operator), + (r'\w' + PERL6_IDENTIFIER_RANGE + '*', Name), + (r"'(\\\\|\\[^\\]|[^'\\])*'", String), + (r'"(\\\\|\\[^\\]|[^"\\])*"', String), + ], + 'root': [ + include('common'), + (r'\{', opening_brace_callback), + (r'\}', closing_brace_callback), + (r'.+?', Text), + ], + 'pre-token': [ + include('common'), + (r'\{', Text, ('#pop', 'token')), + (r'.+?', Text), + ], + 'token-sym-brackets': [ + (r'(?P(?P[' + ''.join(PERL6_BRACKETS) + '])(?P=first_char)*)', + brackets_callback(Name), ('#pop', 'pre-token')), + default(('#pop', 'pre-token')), + ], + 'token': [ + (r'\}', Text, '#pop'), + (r'(?<=:)(?:my|our|state|constant|temp|let).*?;', using(this)), + # make sure that quotes in character classes aren't treated as strings + (r'<(?:[-!?+.]\s*)?\[.*?\]>', String.Regex), + # make sure that '#' characters in quotes aren't treated as comments + (r"(?my|our)\s+)?(?:module|class|role|enum|grammar)', line) + if class_decl: + if saw_perl_decl or class_decl.group('scope') is not None: + return True + rating = 0.05 + continue + break + + return rating + + def __init__(self, **options): + super().__init__(**options) + self.encoding = options.get('encoding', 'utf-8') diff --git a/pygments/lexers/php.py b/pygments/lexers/php.py old mode 100644 new mode 100755 index 4f06d21..1a2ddd4 --- a/pygments/lexers/php.py +++ b/pygments/lexers/php.py @@ -1,270 +1,321 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.php - ~~~~~~~~~~~~~~~~~~~ - - Lexers for PHP and related languages. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, include, bygroups, default, using, \ - this, words -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Other -from pygments.util import get_bool_opt, get_list_opt, shebang_matches - -__all__ = ['ZephirLexer', 'PhpLexer'] - - -class ZephirLexer(RegexLexer): - """ - For `Zephir language `_ source code. - - Zephir is a compiled high level language aimed - to the creation of C-extensions for PHP. - - .. versionadded:: 2.0 - """ - - name = 'Zephir' - aliases = ['zephir'] - filenames = ['*.zep'] - - zephir_keywords = ['fetch', 'echo', 'isset', 'empty'] - zephir_type = ['bit', 'bits', 'string'] - - flags = re.DOTALL | re.MULTILINE - - tokens = { - 'commentsandwhitespace': [ - (r'\s+', Text), - (r'//.*?\n', Comment.Single), - (r'/\*.*?\*/', Comment.Multiline) - ], - 'slashstartsregex': [ - include('commentsandwhitespace'), - (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' - r'([gim]+\b|\B)', String.Regex, '#pop'), - (r'/', Operator, '#pop'), - default('#pop') - ], - 'badregex': [ - (r'\n', Text, '#pop') - ], - 'root': [ - (r'^(?=\s|/)', Text, 'slashstartsregex'), - include('commentsandwhitespace'), - (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|' - r'(<<|>>>?|==?|!=?|->|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'), - (r'[{(\[;,]', Punctuation, 'slashstartsregex'), - (r'[})\].]', Punctuation), - (r'(for|in|while|do|break|return|continue|switch|case|default|if|else|loop|' - r'require|inline|throw|try|catch|finally|new|delete|typeof|instanceof|void|' - r'namespace|use|extends|this|fetch|isset|unset|echo|fetch|likely|unlikely|' - r'empty)\b', Keyword, 'slashstartsregex'), - (r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'), - (r'(abstract|boolean|bool|char|class|const|double|enum|export|extends|final|' - r'native|goto|implements|import|int|string|interface|long|ulong|char|uchar|' - r'float|unsigned|private|protected|public|short|static|self|throws|reverse|' - r'transient|volatile)\b', Keyword.Reserved), - (r'(true|false|null|undefined)\b', Keyword.Constant), - (r'(Array|Boolean|Date|_REQUEST|_COOKIE|_SESSION|' - r'_GET|_POST|_SERVER|this|stdClass|range|count|iterator|' - r'window)\b', Name.Builtin), - (r'[$a-zA-Z_][\w\\]*', Name.Other), - (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), - (r'0x[0-9a-fA-F]+', Number.Hex), - (r'[0-9]+', Number.Integer), - (r'"(\\\\|\\"|[^"])*"', String.Double), - (r"'(\\\\|\\'|[^'])*'", String.Single), - ] - } - - -class PhpLexer(RegexLexer): - """ - For `PHP `_ source code. - For PHP embedded in HTML, use the `HtmlPhpLexer`. - - Additional options accepted: - - `startinline` - If given and ``True`` the lexer starts highlighting with - php code (i.e.: no starting ``>> from pygments.lexers._php_builtins import MODULES - >>> MODULES.keys() - ['PHP Options/Info', 'Zip', 'dba', ...] - - In fact the names of those modules match the module names from - the php documentation. - """ - - name = 'PHP' - aliases = ['php', 'php3', 'php4', 'php5'] - filenames = ['*.php', '*.php[345]', '*.inc'] - mimetypes = ['text/x-php'] - - # Note that a backslash is included in the following two patterns - # PHP uses a backslash as a namespace separator - _ident_char = r'[\\\w]|[^\x00-\x7f]' - _ident_begin = r'(?:[\\_a-z]|[^\x00-\x7f])' - _ident_end = r'(?:' + _ident_char + ')*' - _ident_inner = _ident_begin + _ident_end - - flags = re.IGNORECASE | re.DOTALL | re.MULTILINE - tokens = { - 'root': [ - (r'<\?(php)?', Comment.Preproc, 'php'), - (r'[^<]+', Other), - (r'<', Other) - ], - 'php': [ - (r'\?>', Comment.Preproc, '#pop'), - (r'(<<<)([\'"]?)(' + _ident_inner + r')(\2\n.*?\n\s*)(\3)(;?)(\n)', - bygroups(String, String, String.Delimiter, String, String.Delimiter, - Punctuation, Text)), - (r'\s+', Text), - (r'#.*?\n', Comment.Single), - (r'//.*?\n', Comment.Single), - # put the empty comment here, it is otherwise seen as - # the start of a docstring - (r'/\*\*/', Comment.Multiline), - (r'/\*\*.*?\*/', String.Doc), - (r'/\*.*?\*/', Comment.Multiline), - (r'(->|::)(\s*)(' + _ident_inner + ')', - bygroups(Operator, Text, Name.Attribute)), - (r'[~!%^&*+=|:.<>/@-]+', Operator), - (r'\?', Operator), # don't add to the charclass above! - (r'[\[\]{}();,]+', Punctuation), - (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), - (r'(function)(\s*)(?=\()', bygroups(Keyword, Text)), - (r'(function)(\s+)(&?)(\s*)', - bygroups(Keyword, Text, Operator, Text), 'functionname'), - (r'(const)(\s+)(' + _ident_inner + ')', - bygroups(Keyword, Text, Name.Constant)), - (r'(and|E_PARSE|old_function|E_ERROR|or|as|E_WARNING|parent|' - r'eval|PHP_OS|break|exit|case|extends|PHP_VERSION|cfunction|' - r'FALSE|print|for|require|continue|foreach|require_once|' - r'declare|return|default|static|do|switch|die|stdClass|' - r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|' - r'virtual|endfor|include_once|while|endforeach|global|' - r'endif|list|endswitch|new|endwhile|not|' - r'array|E_ALL|NULL|final|php_user_filter|interface|' - r'implements|public|private|protected|abstract|clone|try|' - r'catch|throw|this|use|namespace|trait|yield|' - r'finally)\b', Keyword), - (r'(true|false|null)\b', Keyword.Constant), - include('magicconstants'), - (r'\$\{\$+' + _ident_inner + r'\}', Name.Variable), - (r'\$+' + _ident_inner, Name.Variable), - (_ident_inner, Name.Other), - (r'(\d+\.\d*|\d*\.\d+)(e[+-]?[0-9]+)?', Number.Float), - (r'\d+e[+-]?[0-9]+', Number.Float), - (r'0[0-7]+', Number.Oct), - (r'0x[a-f0-9]+', Number.Hex), - (r'\d+', Number.Integer), - (r'0b[01]+', Number.Bin), - (r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single), - (r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick), - (r'"', String.Double, 'string'), - ], - 'magicfuncs': [ - # source: http://php.net/manual/en/language.oop5.magic.php - (words(( - '__construct', '__destruct', '__call', '__callStatic', '__get', '__set', - '__isset', '__unset', '__sleep', '__wakeup', '__toString', '__invoke', - '__set_state', '__clone', '__debugInfo',), suffix=r'\b'), - Name.Function.Magic), - ], - 'magicconstants': [ - # source: http://php.net/manual/en/language.constants.predefined.php - (words(( - '__LINE__', '__FILE__', '__DIR__', '__FUNCTION__', '__CLASS__', - '__TRAIT__', '__METHOD__', '__NAMESPACE__',), - suffix=r'\b'), - Name.Constant), - ], - 'classname': [ - (_ident_inner, Name.Class, '#pop') - ], - 'functionname': [ - include('magicfuncs'), - (_ident_inner, Name.Function, '#pop'), - default('#pop') - ], - 'string': [ - (r'"', String.Double, '#pop'), - (r'[^{$"\\]+', String.Double), - (r'\\([nrt"$\\]|[0-7]{1,3}|x[0-9a-f]{1,2})', String.Escape), - (r'\$' + _ident_inner + r'(\[\S+?\]|->' + _ident_inner + ')?', - String.Interpol), - (r'(\{\$\{)(.*?)(\}\})', - bygroups(String.Interpol, using(this, _startinline=True), - String.Interpol)), - (r'(\{)(\$.*?)(\})', - bygroups(String.Interpol, using(this, _startinline=True), - String.Interpol)), - (r'(\$\{)(\S+)(\})', - bygroups(String.Interpol, Name.Variable, String.Interpol)), - (r'[${\\]', String.Double) - ], - } - - def __init__(self, **options): - self.funcnamehighlighting = get_bool_opt( - options, 'funcnamehighlighting', True) - self.disabledmodules = get_list_opt( - options, 'disabledmodules', ['unknown']) - self.startinline = get_bool_opt(options, 'startinline', False) - - # private option argument for the lexer itself - if '_startinline' in options: - self.startinline = options.pop('_startinline') - - # collect activated functions in a set - self._functions = set() - if self.funcnamehighlighting: - from pygments.lexers._php_builtins import MODULES - for key, value in MODULES.items(): - if key not in self.disabledmodules: - self._functions.update(value) - RegexLexer.__init__(self, **options) - - def get_tokens_unprocessed(self, text): - stack = ['root'] - if self.startinline: - stack.append('php') - for index, token, value in \ - RegexLexer.get_tokens_unprocessed(self, text, stack): - if token is Name.Other: - if value in self._functions: - yield index, Name.Builtin, value - continue - yield index, token, value - - def analyse_text(text): - if shebang_matches(text, r'php'): - return True - rv = 0.0 - if re.search(r'<\?(?!xml)', text): - rv += 0.3 - return rv +# -*- coding: utf-8 -*- +""" + pygments.lexers.php + ~~~~~~~~~~~~~~~~~~~ + + Lexers for PHP and related languages. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import Lexer, RegexLexer, include, bygroups, default, \ + using, this, words, do_insertions +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Other, Generic +from pygments.util import get_bool_opt, get_list_opt, shebang_matches + +__all__ = ['ZephirLexer', 'PsyshConsoleLexer', 'PhpLexer'] + +line_re = re.compile('.*?\n') + + +class ZephirLexer(RegexLexer): + """ + For `Zephir language `_ source code. + + Zephir is a compiled high level language aimed + to the creation of C-extensions for PHP. + + .. versionadded:: 2.0 + """ + + name = 'Zephir' + aliases = ['zephir'] + filenames = ['*.zep'] + + zephir_keywords = ['fetch', 'echo', 'isset', 'empty'] + zephir_type = ['bit', 'bits', 'string'] + + flags = re.DOTALL | re.MULTILINE + + tokens = { + 'commentsandwhitespace': [ + (r'\s+', Text), + (r'//.*?\n', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline) + ], + 'slashstartsregex': [ + include('commentsandwhitespace'), + (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' + r'([gim]+\b|\B)', String.Regex, '#pop'), + (r'/', Operator, '#pop'), + default('#pop') + ], + 'badregex': [ + (r'\n', Text, '#pop') + ], + 'root': [ + (r'^(?=\s|/)', Text, 'slashstartsregex'), + include('commentsandwhitespace'), + (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|' + r'(<<|>>>?|==?|!=?|->|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'), + (r'[{(\[;,]', Punctuation, 'slashstartsregex'), + (r'[})\].]', Punctuation), + (r'(for|in|while|do|break|return|continue|switch|case|default|if|else|loop|' + r'require|inline|throw|try|catch|finally|new|delete|typeof|instanceof|void|' + r'namespace|use|extends|this|fetch|isset|unset|echo|fetch|likely|unlikely|' + r'empty)\b', Keyword, 'slashstartsregex'), + (r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'), + (r'(abstract|boolean|bool|char|class|const|double|enum|export|extends|final|' + r'native|goto|implements|import|int|string|interface|long|ulong|char|uchar|' + r'float|unsigned|private|protected|public|short|static|self|throws|reverse|' + r'transient|volatile)\b', Keyword.Reserved), + (r'(true|false|null|undefined)\b', Keyword.Constant), + (r'(Array|Boolean|Date|_REQUEST|_COOKIE|_SESSION|' + r'_GET|_POST|_SERVER|this|stdClass|range|count|iterator|' + r'window)\b', Name.Builtin), + (r'[$a-zA-Z_][\w\\]*', Name.Other), + (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), + (r'0x[0-9a-fA-F]+', Number.Hex), + (r'[0-9]+', Number.Integer), + (r'"(\\\\|\\"|[^"])*"', String.Double), + (r"'(\\\\|\\'|[^'])*'", String.Single), + ] + } + + +class PsyshConsoleLexer(Lexer): + """ + For `PsySH`_ console output, such as: + + .. sourcecode:: psysh + + >>> $greeting = function($name): string { + ... return "Hello, {$name}"; + ... }; + => Closure($name): string {#2371 …3} + >>> $greeting('World') + => "Hello, World" + + .. _PsySH: https://psysh.org/ + .. versionadded:: 2.7 + """ + name = 'PsySH console session for PHP' + aliases = ['psysh'] + + def __init__(self, **options): + options['startinline'] = True + Lexer.__init__(self, **options) + + def get_tokens_unprocessed(self, text): + phplexer = PhpLexer(**self.options) + curcode = '' + insertions = [] + for match in line_re.finditer(text): + line = match.group() + if line.startswith('>>> ') or line.startswith('... '): + insertions.append((len(curcode), + [(0, Generic.Prompt, line[:4])])) + curcode += line[4:] + elif line.rstrip() == '...': + insertions.append((len(curcode), + [(0, Generic.Prompt, '...')])) + curcode += line[3:] + else: + if curcode: + yield from do_insertions( + insertions, phplexer.get_tokens_unprocessed(curcode)) + curcode = '' + insertions = [] + yield match.start(), Generic.Output, line + if curcode: + yield from do_insertions(insertions, + phplexer.get_tokens_unprocessed(curcode)) + + +class PhpLexer(RegexLexer): + """ + For `PHP `_ source code. + For PHP embedded in HTML, use the `HtmlPhpLexer`. + + Additional options accepted: + + `startinline` + If given and ``True`` the lexer starts highlighting with + php code (i.e.: no starting ``>> from pygments.lexers._php_builtins import MODULES + >>> MODULES.keys() + ['PHP Options/Info', 'Zip', 'dba', ...] + + In fact the names of those modules match the module names from + the php documentation. + """ + + name = 'PHP' + aliases = ['php', 'php3', 'php4', 'php5'] + filenames = ['*.php', '*.php[345]', '*.inc'] + mimetypes = ['text/x-php'] + + # Note that a backslash is included in the following two patterns + # PHP uses a backslash as a namespace separator + _ident_char = r'[\\\w]|[^\x00-\x7f]' + _ident_begin = r'(?:[\\_a-z]|[^\x00-\x7f])' + _ident_end = r'(?:' + _ident_char + ')*' + _ident_inner = _ident_begin + _ident_end + + flags = re.IGNORECASE | re.DOTALL | re.MULTILINE + tokens = { + 'root': [ + (r'<\?(php)?', Comment.Preproc, 'php'), + (r'[^<]+', Other), + (r'<', Other) + ], + 'php': [ + (r'\?>', Comment.Preproc, '#pop'), + (r'(<<<)([\'"]?)(' + _ident_inner + r')(\2\n.*?\n\s*)(\3)(;?)(\n)', + bygroups(String, String, String.Delimiter, String, String.Delimiter, + Punctuation, Text)), + (r'\s+', Text), + (r'#.*?\n', Comment.Single), + (r'//.*?\n', Comment.Single), + # put the empty comment here, it is otherwise seen as + # the start of a docstring + (r'/\*\*/', Comment.Multiline), + (r'/\*\*.*?\*/', String.Doc), + (r'/\*.*?\*/', Comment.Multiline), + (r'(->|::)(\s*)(' + _ident_inner + ')', + bygroups(Operator, Text, Name.Attribute)), + (r'[~!%^&*+=|:.<>/@-]+', Operator), + (r'\?', Operator), # don't add to the charclass above! + (r'[\[\]{}();,]+', Punctuation), + (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), + (r'(function)(\s*)(?=\()', bygroups(Keyword, Text)), + (r'(function)(\s+)(&?)(\s*)', + bygroups(Keyword, Text, Operator, Text), 'functionname'), + (r'(const)(\s+)(' + _ident_inner + ')', + bygroups(Keyword, Text, Name.Constant)), + (r'(and|E_PARSE|old_function|E_ERROR|or|as|E_WARNING|parent|' + r'eval|PHP_OS|break|exit|case|extends|PHP_VERSION|cfunction|' + r'FALSE|print|for|require|continue|foreach|require_once|' + r'declare|return|default|static|do|switch|die|stdClass|' + r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|' + r'virtual|endfor|include_once|while|endforeach|global|' + r'endif|list|endswitch|new|endwhile|not|' + r'array|E_ALL|NULL|final|php_user_filter|interface|' + r'implements|public|private|protected|abstract|clone|try|' + r'catch|throw|this|use|namespace|trait|yield|' + r'finally)\b', Keyword), + (r'(true|false|null)\b', Keyword.Constant), + include('magicconstants'), + (r'\$\{\$+' + _ident_inner + r'\}', Name.Variable), + (r'\$+' + _ident_inner, Name.Variable), + (_ident_inner, Name.Other), + (r'(\d+\.\d*|\d*\.\d+)(e[+-]?[0-9]+)?', Number.Float), + (r'\d+e[+-]?[0-9]+', Number.Float), + (r'0[0-7]+', Number.Oct), + (r'0x[a-f0-9]+', Number.Hex), + (r'\d+', Number.Integer), + (r'0b[01]+', Number.Bin), + (r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single), + (r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick), + (r'"', String.Double, 'string'), + ], + 'magicfuncs': [ + # source: http://php.net/manual/en/language.oop5.magic.php + (words(( + '__construct', '__destruct', '__call', '__callStatic', '__get', '__set', + '__isset', '__unset', '__sleep', '__wakeup', '__toString', '__invoke', + '__set_state', '__clone', '__debugInfo',), suffix=r'\b'), + Name.Function.Magic), + ], + 'magicconstants': [ + # source: http://php.net/manual/en/language.constants.predefined.php + (words(( + '__LINE__', '__FILE__', '__DIR__', '__FUNCTION__', '__CLASS__', + '__TRAIT__', '__METHOD__', '__NAMESPACE__',), + suffix=r'\b'), + Name.Constant), + ], + 'classname': [ + (_ident_inner, Name.Class, '#pop') + ], + 'functionname': [ + include('magicfuncs'), + (_ident_inner, Name.Function, '#pop'), + default('#pop') + ], + 'string': [ + (r'"', String.Double, '#pop'), + (r'[^{$"\\]+', String.Double), + (r'\\([nrt"$\\]|[0-7]{1,3}|x[0-9a-f]{1,2})', String.Escape), + (r'\$' + _ident_inner + r'(\[\S+?\]|->' + _ident_inner + ')?', + String.Interpol), + (r'(\{\$\{)(.*?)(\}\})', + bygroups(String.Interpol, using(this, _startinline=True), + String.Interpol)), + (r'(\{)(\$.*?)(\})', + bygroups(String.Interpol, using(this, _startinline=True), + String.Interpol)), + (r'(\$\{)(\S+)(\})', + bygroups(String.Interpol, Name.Variable, String.Interpol)), + (r'[${\\]', String.Double) + ], + } + + def __init__(self, **options): + self.funcnamehighlighting = get_bool_opt( + options, 'funcnamehighlighting', True) + self.disabledmodules = get_list_opt( + options, 'disabledmodules', ['unknown']) + self.startinline = get_bool_opt(options, 'startinline', False) + + # private option argument for the lexer itself + if '_startinline' in options: + self.startinline = options.pop('_startinline') + + # collect activated functions in a set + self._functions = set() + if self.funcnamehighlighting: + from pygments.lexers._php_builtins import MODULES + for key, value in MODULES.items(): + if key not in self.disabledmodules: + self._functions.update(value) + RegexLexer.__init__(self, **options) + + def get_tokens_unprocessed(self, text): + stack = ['root'] + if self.startinline: + stack.append('php') + for index, token, value in \ + RegexLexer.get_tokens_unprocessed(self, text, stack): + if token is Name.Other: + if value in self._functions: + yield index, Name.Builtin, value + continue + yield index, token, value + + def analyse_text(text): + if shebang_matches(text, r'php'): + return True + rv = 0.0 + if re.search(r'<\?(?!xml)', text): + rv += 0.3 + return rv diff --git a/pygments/lexers/pointless.py b/pygments/lexers/pointless.py new file mode 100755 index 0000000..9f96cdb --- /dev/null +++ b/pygments/lexers/pointless.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.pointless + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Pointless. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, words +from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \ + Punctuation, String, Text + +__all__ = ['PointlessLexer'] + + +class PointlessLexer(RegexLexer): + """ + For `Pointless `_ source code. + + .. versionadded:: 2.7 + """ + + name = 'Pointless' + aliases = ['pointless'] + filenames = ['*.ptls'] + + ops = words([ + "+", "-", "*", "/", "**", "%", "+=", "-=", "*=", + "/=", "**=", "%=", "|>", "=", "==", "!=", "<", ">", + "<=", ">=", "=>", "$", "++", + ]) + + keywords = words([ + "if", "then", "else", "where", "with", "cond", + "case", "and", "or", "not", "in", "as", "for", + "requires", "throw", "try", "catch", "when", + "yield", "upval", + ], suffix=r'\b') + + tokens = { + 'root': [ + (r'[ \n\r]+', Text), + (r'--.*$', Comment.Single), + (r'"""', String, 'multiString'), + (r'"', String, 'string'), + (r'[\[\](){}:;,.]', Punctuation), + (ops, Operator), + (keywords, Keyword), + (r'\d+|\d*\.\d+', Number), + (r'(true|false)\b', Name.Builtin), + (r'[A-Z][a-zA-Z0-9]*\b', String.Symbol), + (r'output\b', Name.Variable.Magic), + (r'(export|import)\b', Keyword.Namespace), + (r'[a-z][a-zA-Z0-9]*\b', Name.Variable) + ], + 'multiString': [ + (r'\\.', String.Escape), + (r'"""', String, '#pop'), + (r'"', String), + (r'[^\\"]+', String), + ], + 'string': [ + (r'\\.', String.Escape), + (r'"', String, '#pop'), + (r'\n', Error), + (r'[^\\"]+', String), + ], + } diff --git a/pygments/lexers/pony.py b/pygments/lexers/pony.py old mode 100644 new mode 100755 index 8f5d428..dede584 --- a/pygments/lexers/pony.py +++ b/pygments/lexers/pony.py @@ -1,94 +1,94 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.pony - ~~~~~~~~~~~~~~~~~~~~ - - Lexers for Pony and related languages. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import RegexLexer, bygroups, words -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation - -__all__ = ['PonyLexer'] - - -class PonyLexer(RegexLexer): - """ - For Pony source code. - - .. versionadded:: 2.4 - """ - - name = 'Pony' - aliases = ['pony'] - filenames = ['*.pony'] - - _caps = r'(iso|trn|ref|val|box|tag)' - - tokens = { - 'root': [ - (r'\n', Text), - (r'[^\S\n]+', Text), - (r'//.*\n', Comment.Single), - (r'/\*', Comment.Multiline, 'nested_comment'), - (r'"""(?:.|\n)*?"""', String.Doc), - (r'"', String, 'string'), - (r'\'.*\'', String.Char), - (r'=>|[]{}:().~;,|&!^?[]', Punctuation), - (words(( - 'addressof', 'and', 'as', 'consume', 'digestof', 'is', 'isnt', - 'not', 'or'), - suffix=r'\b'), - Operator.Word), - (r'!=|==|<<|>>|[-+/*%=<>]', Operator), - (words(( - 'box', 'break', 'compile_error', 'compile_intrinsic', - 'continue', 'do', 'else', 'elseif', 'embed', 'end', 'error', - 'for', 'if', 'ifdef', 'in', 'iso', 'lambda', 'let', 'match', - 'object', 'recover', 'ref', 'repeat', 'return', 'tag', 'then', - 'this', 'trn', 'try', 'until', 'use', 'var', 'val', 'where', - 'while', 'with', '#any', '#read', '#send', '#share'), - suffix=r'\b'), - Keyword), - (r'(actor|class|struct|primitive|interface|trait|type)((?:\s)+)', - bygroups(Keyword, Text), 'typename'), - (r'(new|fun|be)((?:\s)+)', bygroups(Keyword, Text), 'methodname'), - (words(( - 'I8', 'U8', 'I16', 'U16', 'I32', 'U32', 'I64', 'U64', 'I128', - 'U128', 'ILong', 'ULong', 'ISize', 'USize', 'F32', 'F64', - 'Bool', 'Pointer', 'None', 'Any', 'Array', 'String', - 'Iterator'), - suffix=r'\b'), - Name.Builtin.Type), - (r'_?[A-Z]\w*', Name.Type), - (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float), - (r'0x[0-9a-fA-F]+', Number.Hex), - (r'\d+', Number.Integer), - (r'(true|false)\b', Name.Builtin), - (r'_\d*', Name), - (r'_?[a-z][\w\'_]*', Name) - ], - 'typename': [ - (_caps + r'?((?:\s)*)(_?[A-Z]\w*)', - bygroups(Keyword, Text, Name.Class), '#pop') - ], - 'methodname': [ - (_caps + r'?((?:\s)*)(_?[a-z]\w*)', - bygroups(Keyword, Text, Name.Function), '#pop') - ], - 'nested_comment': [ - (r'[^*/]+', Comment.Multiline), - (r'/\*', Comment.Multiline, '#push'), - (r'\*/', Comment.Multiline, '#pop'), - (r'[*/]', Comment.Multiline) - ], - 'string': [ - (r'"', String, '#pop'), - (r'\\"', String), - (r'[^\\"]+', String) - ] - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.pony + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for Pony and related languages. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, bygroups, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['PonyLexer'] + + +class PonyLexer(RegexLexer): + """ + For Pony source code. + + .. versionadded:: 2.4 + """ + + name = 'Pony' + aliases = ['pony'] + filenames = ['*.pony'] + + _caps = r'(iso|trn|ref|val|box|tag)' + + tokens = { + 'root': [ + (r'\n', Text), + (r'[^\S\n]+', Text), + (r'//.*\n', Comment.Single), + (r'/\*', Comment.Multiline, 'nested_comment'), + (r'"""(?:.|\n)*?"""', String.Doc), + (r'"', String, 'string'), + (r'\'.*\'', String.Char), + (r'=>|[]{}:().~;,|&!^?[]', Punctuation), + (words(( + 'addressof', 'and', 'as', 'consume', 'digestof', 'is', 'isnt', + 'not', 'or'), + suffix=r'\b'), + Operator.Word), + (r'!=|==|<<|>>|[-+/*%=<>]', Operator), + (words(( + 'box', 'break', 'compile_error', 'compile_intrinsic', + 'continue', 'do', 'else', 'elseif', 'embed', 'end', 'error', + 'for', 'if', 'ifdef', 'in', 'iso', 'lambda', 'let', 'match', + 'object', 'recover', 'ref', 'repeat', 'return', 'tag', 'then', + 'this', 'trn', 'try', 'until', 'use', 'var', 'val', 'where', + 'while', 'with', '#any', '#read', '#send', '#share'), + suffix=r'\b'), + Keyword), + (r'(actor|class|struct|primitive|interface|trait|type)((?:\s)+)', + bygroups(Keyword, Text), 'typename'), + (r'(new|fun|be)((?:\s)+)', bygroups(Keyword, Text), 'methodname'), + (words(( + 'I8', 'U8', 'I16', 'U16', 'I32', 'U32', 'I64', 'U64', 'I128', + 'U128', 'ILong', 'ULong', 'ISize', 'USize', 'F32', 'F64', + 'Bool', 'Pointer', 'None', 'Any', 'Array', 'String', + 'Iterator'), + suffix=r'\b'), + Name.Builtin.Type), + (r'_?[A-Z]\w*', Name.Type), + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float), + (r'0x[0-9a-fA-F]+', Number.Hex), + (r'\d+', Number.Integer), + (r'(true|false)\b', Name.Builtin), + (r'_\d*', Name), + (r'_?[a-z][\w\']*', Name) + ], + 'typename': [ + (_caps + r'?((?:\s)*)(_?[A-Z]\w*)', + bygroups(Keyword, Text, Name.Class), '#pop') + ], + 'methodname': [ + (_caps + r'?((?:\s)*)(_?[a-z]\w*)', + bygroups(Keyword, Text, Name.Function), '#pop') + ], + 'nested_comment': [ + (r'[^*/]+', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline) + ], + 'string': [ + (r'"', String, '#pop'), + (r'\\"', String), + (r'[^\\"]+', String) + ] + } diff --git a/pygments/lexers/praat.py b/pygments/lexers/praat.py old mode 100644 new mode 100755 index 4a6a14f..4e1a6c4 --- a/pygments/lexers/praat.py +++ b/pygments/lexers/praat.py @@ -1,302 +1,302 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.praat - ~~~~~~~~~~~~~~~~~~~~~ - - Lexer for Praat - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import RegexLexer, words, bygroups, include -from pygments.token import Name, Text, Comment, Keyword, String, Punctuation, Number, \ - Operator - -__all__ = ['PraatLexer'] - - -class PraatLexer(RegexLexer): - """ - For `Praat `_ scripts. - - .. versionadded:: 2.1 - """ - - name = 'Praat' - aliases = ['praat'] - filenames = ['*.praat', '*.proc', '*.psc'] - - keywords = ( - 'if', 'then', 'else', 'elsif', 'elif', 'endif', 'fi', 'for', 'from', 'to', - 'endfor', 'endproc', 'while', 'endwhile', 'repeat', 'until', 'select', 'plus', - 'minus', 'demo', 'assert', 'stopwatch', 'nocheck', 'nowarn', 'noprogress', - 'editor', 'endeditor', 'clearinfo', - ) - - functions_string = ( - 'backslashTrigraphsToUnicode', 'chooseDirectory', 'chooseReadFile', - 'chooseWriteFile', 'date', 'demoKey', 'do', 'environment', 'extractLine', - 'extractWord', 'fixed', 'info', 'left', 'mid', 'percent', 'readFile', 'replace', - 'replace_regex', 'right', 'selected', 'string', 'unicodeToBackslashTrigraphs', - ) - - functions_numeric = ( - 'abs', 'appendFile', 'appendFileLine', 'appendInfo', 'appendInfoLine', 'arccos', - 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'barkToHertz', - 'beginPause', 'beginSendPraat', 'besselI', 'besselK', 'beta', 'beta2', - 'binomialP', 'binomialQ', 'boolean', 'ceiling', 'chiSquareP', 'chiSquareQ', - 'choice', 'comment', 'cos', 'cosh', 'createDirectory', 'deleteFile', - 'demoClicked', 'demoClickedIn', 'demoCommandKeyPressed', - 'demoExtraControlKeyPressed', 'demoInput', 'demoKeyPressed', - 'demoOptionKeyPressed', 'demoShiftKeyPressed', 'demoShow', 'demoWaitForInput', - 'demoWindowTitle', 'demoX', 'demoY', 'differenceLimensToPhon', 'do', 'editor', - 'endPause', 'endSendPraat', 'endsWith', 'erb', 'erbToHertz', 'erf', 'erfc', - 'exitScript', 'exp', 'extractNumber', 'fileReadable', 'fisherP', 'fisherQ', - 'floor', 'gaussP', 'gaussQ', 'hertzToBark', 'hertzToErb', 'hertzToMel', - 'hertzToSemitones', 'imax', 'imin', 'incompleteBeta', 'incompleteGammaP', 'index', - 'index_regex', 'integer', 'invBinomialP', 'invBinomialQ', 'invChiSquareQ', 'invFisherQ', - 'invGaussQ', 'invSigmoid', 'invStudentQ', 'length', 'ln', 'lnBeta', 'lnGamma', - 'log10', 'log2', 'max', 'melToHertz', 'min', 'minusObject', 'natural', 'number', - 'numberOfColumns', 'numberOfRows', 'numberOfSelected', 'objectsAreIdentical', - 'option', 'optionMenu', 'pauseScript', 'phonToDifferenceLimens', 'plusObject', - 'positive', 'randomBinomial', 'randomGauss', 'randomInteger', 'randomPoisson', - 'randomUniform', 'real', 'readFile', 'removeObject', 'rindex', 'rindex_regex', - 'round', 'runScript', 'runSystem', 'runSystem_nocheck', 'selectObject', - 'selected', 'semitonesToHertz', 'sentence', 'sentencetext', 'sigmoid', 'sin', 'sinc', - 'sincpi', 'sinh', 'soundPressureToPhon', 'sqrt', 'startsWith', 'studentP', - 'studentQ', 'tan', 'tanh', 'text', 'variableExists', 'word', 'writeFile', 'writeFileLine', - 'writeInfo', 'writeInfoLine', - ) - - functions_array = ( - 'linear', 'randomGauss', 'randomInteger', 'randomUniform', 'zero', - ) - - objects = ( - 'Activation', 'AffineTransform', 'AmplitudeTier', 'Art', 'Artword', - 'Autosegment', 'BarkFilter', 'BarkSpectrogram', 'CCA', 'Categories', - 'Cepstrogram', 'Cepstrum', 'Cepstrumc', 'ChebyshevSeries', 'ClassificationTable', - 'Cochleagram', 'Collection', 'ComplexSpectrogram', 'Configuration', 'Confusion', - 'ContingencyTable', 'Corpus', 'Correlation', 'Covariance', - 'CrossCorrelationTable', 'CrossCorrelationTables', 'DTW', 'DataModeler', - 'Diagonalizer', 'Discriminant', 'Dissimilarity', 'Distance', 'Distributions', - 'DurationTier', 'EEG', 'ERP', 'ERPTier', 'EditCostsTable', 'EditDistanceTable', - 'Eigen', 'Excitation', 'Excitations', 'ExperimentMFC', 'FFNet', 'FeatureWeights', - 'FileInMemory', 'FilesInMemory', 'Formant', 'FormantFilter', 'FormantGrid', - 'FormantModeler', 'FormantPoint', 'FormantTier', 'GaussianMixture', 'HMM', - 'HMM_Observation', 'HMM_ObservationSequence', 'HMM_State', 'HMM_StateSequence', - 'Harmonicity', 'ISpline', 'Index', 'Intensity', 'IntensityTier', 'IntervalTier', - 'KNN', 'KlattGrid', 'KlattTable', 'LFCC', 'LPC', 'Label', 'LegendreSeries', - 'LinearRegression', 'LogisticRegression', 'LongSound', 'Ltas', 'MFCC', 'MSpline', - 'ManPages', 'Manipulation', 'Matrix', 'MelFilter', 'MelSpectrogram', - 'MixingMatrix', 'Movie', 'Network', 'Object', 'OTGrammar', 'OTHistory', 'OTMulti', - 'PCA', 'PairDistribution', 'ParamCurve', 'Pattern', 'Permutation', 'Photo', - 'Pitch', 'PitchModeler', 'PitchTier', 'PointProcess', 'Polygon', 'Polynomial', - 'PowerCepstrogram', 'PowerCepstrum', 'Procrustes', 'RealPoint', 'RealTier', - 'ResultsMFC', 'Roots', 'SPINET', 'SSCP', 'SVD', 'Salience', 'ScalarProduct', - 'Similarity', 'SimpleString', 'SortedSetOfString', 'Sound', 'Speaker', - 'Spectrogram', 'Spectrum', 'SpectrumTier', 'SpeechSynthesizer', 'SpellingChecker', - 'Strings', 'StringsIndex', 'Table', 'TableOfReal', 'TextGrid', 'TextInterval', - 'TextPoint', 'TextTier', 'Tier', 'Transition', 'VocalTract', 'VocalTractTier', - 'Weight', 'WordList', - ) - - variables_numeric = ( - 'macintosh', 'windows', 'unix', 'praatVersion', 'pi', 'e', 'undefined', - ) - - variables_string = ( - 'praatVersion', 'tab', 'shellDirectory', 'homeDirectory', - 'preferencesDirectory', 'newline', 'temporaryDirectory', - 'defaultDirectory', - ) - - object_attributes = ( - 'ncol', 'nrow', 'xmin', 'ymin', 'xmax', 'ymax', 'nx', 'ny', 'dx', 'dy', - ) - - tokens = { - 'root': [ - (r'(\s+)(#.*?$)', bygroups(Text, Comment.Single)), - (r'^#.*?$', Comment.Single), - (r';[^\n]*', Comment.Single), - (r'\s+', Text), - - (r'\bprocedure\b', Keyword, 'procedure_definition'), - (r'\bcall\b', Keyword, 'procedure_call'), - (r'@', Name.Function, 'procedure_call'), - - include('function_call'), - - (words(keywords, suffix=r'\b'), Keyword), - - (r'(\bform\b)(\s+)([^\n]+)', - bygroups(Keyword, Text, String), 'old_form'), - - (r'(print(?:line|tab)?|echo|exit|asserterror|pause|send(?:praat|socket)|' - r'include|execute|system(?:_nocheck)?)(\s+)', - bygroups(Keyword, Text), 'string_unquoted'), - - (r'(goto|label)(\s+)(\w+)', bygroups(Keyword, Text, Name.Label)), - - include('variable_name'), - include('number'), - - (r'"', String, 'string'), - - (words((objects), suffix=r'(?=\s+\S+\n)'), Name.Class, 'string_unquoted'), - - (r'\b[A-Z]', Keyword, 'command'), - (r'(\.{3}|[)(,])', Punctuation), - ], - 'command': [ - (r'( ?[\w()-]+ ?)', Keyword), - - include('string_interpolated'), - - (r'\.{3}', Keyword, ('#pop', 'old_arguments')), - (r':', Keyword, ('#pop', 'comma_list')), - (r'\s', Text, '#pop'), - ], - 'procedure_call': [ - (r'\s+', Text), - (r'([\w.]+)(:|\s*\()', - bygroups(Name.Function, Text), '#pop'), - (r'([\w.]+)', Name.Function, ('#pop', 'old_arguments')), - ], - 'procedure_definition': [ - (r'\s', Text), - (r'([\w.]+)(\s*?[(:])', - bygroups(Name.Function, Text), '#pop'), - (r'([\w.]+)([^\n]*)', - bygroups(Name.Function, Text), '#pop'), - ], - 'function_call': [ - (words(functions_string, suffix=r'\$(?=\s*[:(])'), Name.Function, 'function'), - (words(functions_array, suffix=r'#(?=\s*[:(])'), Name.Function, 'function'), - (words(functions_numeric, suffix=r'(?=\s*[:(])'), Name.Function, 'function'), - ], - 'function': [ - (r'\s+', Text), - (r':', Punctuation, ('#pop', 'comma_list')), - (r'\s*\(', Punctuation, ('#pop', 'comma_list')), - ], - 'comma_list': [ - (r'(\s*\n\s*)(\.{3})', bygroups(Text, Punctuation)), - - (r'(\s*[])\n])', Text, '#pop'), - - (r'\s+', Text), - (r'"', String, 'string'), - (r'\b(if|then|else|fi|endif)\b', Keyword), - - include('function_call'), - include('variable_name'), - include('operator'), - include('number'), - - (r'[()]', Text), - (r',', Punctuation), - ], - 'old_arguments': [ - (r'\n', Text, '#pop'), - - include('variable_name'), - include('operator'), - include('number'), - - (r'"', String, 'string'), - (r'[^\n]', Text), - ], - 'number': [ - (r'\n', Text, '#pop'), - (r'\b\d+(\.\d*)?([eE][-+]?\d+)?%?', Number), - ], - 'object_reference': [ - include('string_interpolated'), - (r'([a-z][a-zA-Z0-9_]*|\d+)', Name.Builtin), - - (words(object_attributes, prefix=r'\.'), Name.Builtin, '#pop'), - - (r'\$', Name.Builtin), - (r'\[', Text, '#pop'), - ], - 'variable_name': [ - include('operator'), - include('number'), - - (words(variables_string, suffix=r'\$'), Name.Variable.Global), - (words(variables_numeric, - suffix=r'(?=[^a-zA-Z0-9\._"\'\$#\[:\(]|\s|^|$)'), - Name.Variable.Global), - - (words(objects, prefix=r'\b', suffix=r"(_)"), - bygroups(Name.Builtin, Name.Builtin), - 'object_reference'), - - (r'\.?_?[a-z][\w.]*(\$|#)?', Text), - (r'[\[\]]', Punctuation, 'comma_list'), - - include('string_interpolated'), - ], - 'operator': [ - (r'([+\/*<>=!-]=?|[&*|][&*|]?|\^|<>)', Operator), - (r'(?`_ scripts. + + .. versionadded:: 2.1 + """ + + name = 'Praat' + aliases = ['praat'] + filenames = ['*.praat', '*.proc', '*.psc'] + + keywords = ( + 'if', 'then', 'else', 'elsif', 'elif', 'endif', 'fi', 'for', 'from', 'to', + 'endfor', 'endproc', 'while', 'endwhile', 'repeat', 'until', 'select', 'plus', + 'minus', 'demo', 'assert', 'stopwatch', 'nocheck', 'nowarn', 'noprogress', + 'editor', 'endeditor', 'clearinfo', + ) + + functions_string = ( + 'backslashTrigraphsToUnicode', 'chooseDirectory', 'chooseReadFile', + 'chooseWriteFile', 'date', 'demoKey', 'do', 'environment', 'extractLine', + 'extractWord', 'fixed', 'info', 'left', 'mid', 'percent', 'readFile', 'replace', + 'replace_regex', 'right', 'selected', 'string', 'unicodeToBackslashTrigraphs', + ) + + functions_numeric = ( + 'abs', 'appendFile', 'appendFileLine', 'appendInfo', 'appendInfoLine', 'arccos', + 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'barkToHertz', + 'beginPause', 'beginSendPraat', 'besselI', 'besselK', 'beta', 'beta2', + 'binomialP', 'binomialQ', 'boolean', 'ceiling', 'chiSquareP', 'chiSquareQ', + 'choice', 'comment', 'cos', 'cosh', 'createDirectory', 'deleteFile', + 'demoClicked', 'demoClickedIn', 'demoCommandKeyPressed', + 'demoExtraControlKeyPressed', 'demoInput', 'demoKeyPressed', + 'demoOptionKeyPressed', 'demoShiftKeyPressed', 'demoShow', 'demoWaitForInput', + 'demoWindowTitle', 'demoX', 'demoY', 'differenceLimensToPhon', 'do', 'editor', + 'endPause', 'endSendPraat', 'endsWith', 'erb', 'erbToHertz', 'erf', 'erfc', + 'exitScript', 'exp', 'extractNumber', 'fileReadable', 'fisherP', 'fisherQ', + 'floor', 'gaussP', 'gaussQ', 'hertzToBark', 'hertzToErb', 'hertzToMel', + 'hertzToSemitones', 'imax', 'imin', 'incompleteBeta', 'incompleteGammaP', 'index', + 'index_regex', 'integer', 'invBinomialP', 'invBinomialQ', 'invChiSquareQ', 'invFisherQ', + 'invGaussQ', 'invSigmoid', 'invStudentQ', 'length', 'ln', 'lnBeta', 'lnGamma', + 'log10', 'log2', 'max', 'melToHertz', 'min', 'minusObject', 'natural', 'number', + 'numberOfColumns', 'numberOfRows', 'numberOfSelected', 'objectsAreIdentical', + 'option', 'optionMenu', 'pauseScript', 'phonToDifferenceLimens', 'plusObject', + 'positive', 'randomBinomial', 'randomGauss', 'randomInteger', 'randomPoisson', + 'randomUniform', 'real', 'readFile', 'removeObject', 'rindex', 'rindex_regex', + 'round', 'runScript', 'runSystem', 'runSystem_nocheck', 'selectObject', + 'selected', 'semitonesToHertz', 'sentence', 'sentencetext', 'sigmoid', 'sin', 'sinc', + 'sincpi', 'sinh', 'soundPressureToPhon', 'sqrt', 'startsWith', 'studentP', + 'studentQ', 'tan', 'tanh', 'text', 'variableExists', 'word', 'writeFile', 'writeFileLine', + 'writeInfo', 'writeInfoLine', + ) + + functions_array = ( + 'linear', 'randomGauss', 'randomInteger', 'randomUniform', 'zero', + ) + + objects = ( + 'Activation', 'AffineTransform', 'AmplitudeTier', 'Art', 'Artword', + 'Autosegment', 'BarkFilter', 'BarkSpectrogram', 'CCA', 'Categories', + 'Cepstrogram', 'Cepstrum', 'Cepstrumc', 'ChebyshevSeries', 'ClassificationTable', + 'Cochleagram', 'Collection', 'ComplexSpectrogram', 'Configuration', 'Confusion', + 'ContingencyTable', 'Corpus', 'Correlation', 'Covariance', + 'CrossCorrelationTable', 'CrossCorrelationTables', 'DTW', 'DataModeler', + 'Diagonalizer', 'Discriminant', 'Dissimilarity', 'Distance', 'Distributions', + 'DurationTier', 'EEG', 'ERP', 'ERPTier', 'EditCostsTable', 'EditDistanceTable', + 'Eigen', 'Excitation', 'Excitations', 'ExperimentMFC', 'FFNet', 'FeatureWeights', + 'FileInMemory', 'FilesInMemory', 'Formant', 'FormantFilter', 'FormantGrid', + 'FormantModeler', 'FormantPoint', 'FormantTier', 'GaussianMixture', 'HMM', + 'HMM_Observation', 'HMM_ObservationSequence', 'HMM_State', 'HMM_StateSequence', + 'Harmonicity', 'ISpline', 'Index', 'Intensity', 'IntensityTier', 'IntervalTier', + 'KNN', 'KlattGrid', 'KlattTable', 'LFCC', 'LPC', 'Label', 'LegendreSeries', + 'LinearRegression', 'LogisticRegression', 'LongSound', 'Ltas', 'MFCC', 'MSpline', + 'ManPages', 'Manipulation', 'Matrix', 'MelFilter', 'MelSpectrogram', + 'MixingMatrix', 'Movie', 'Network', 'Object', 'OTGrammar', 'OTHistory', 'OTMulti', + 'PCA', 'PairDistribution', 'ParamCurve', 'Pattern', 'Permutation', 'Photo', + 'Pitch', 'PitchModeler', 'PitchTier', 'PointProcess', 'Polygon', 'Polynomial', + 'PowerCepstrogram', 'PowerCepstrum', 'Procrustes', 'RealPoint', 'RealTier', + 'ResultsMFC', 'Roots', 'SPINET', 'SSCP', 'SVD', 'Salience', 'ScalarProduct', + 'Similarity', 'SimpleString', 'SortedSetOfString', 'Sound', 'Speaker', + 'Spectrogram', 'Spectrum', 'SpectrumTier', 'SpeechSynthesizer', 'SpellingChecker', + 'Strings', 'StringsIndex', 'Table', 'TableOfReal', 'TextGrid', 'TextInterval', + 'TextPoint', 'TextTier', 'Tier', 'Transition', 'VocalTract', 'VocalTractTier', + 'Weight', 'WordList', + ) + + variables_numeric = ( + 'macintosh', 'windows', 'unix', 'praatVersion', 'pi', 'e', 'undefined', + ) + + variables_string = ( + 'praatVersion', 'tab', 'shellDirectory', 'homeDirectory', + 'preferencesDirectory', 'newline', 'temporaryDirectory', + 'defaultDirectory', + ) + + object_attributes = ( + 'ncol', 'nrow', 'xmin', 'ymin', 'xmax', 'ymax', 'nx', 'ny', 'dx', 'dy', + ) + + tokens = { + 'root': [ + (r'(\s+)(#.*?$)', bygroups(Text, Comment.Single)), + (r'^#.*?$', Comment.Single), + (r';[^\n]*', Comment.Single), + (r'\s+', Text), + + (r'\bprocedure\b', Keyword, 'procedure_definition'), + (r'\bcall\b', Keyword, 'procedure_call'), + (r'@', Name.Function, 'procedure_call'), + + include('function_call'), + + (words(keywords, suffix=r'\b'), Keyword), + + (r'(\bform\b)(\s+)([^\n]+)', + bygroups(Keyword, Text, String), 'old_form'), + + (r'(print(?:line|tab)?|echo|exit|asserterror|pause|send(?:praat|socket)|' + r'include|execute|system(?:_nocheck)?)(\s+)', + bygroups(Keyword, Text), 'string_unquoted'), + + (r'(goto|label)(\s+)(\w+)', bygroups(Keyword, Text, Name.Label)), + + include('variable_name'), + include('number'), + + (r'"', String, 'string'), + + (words((objects), suffix=r'(?=\s+\S+\n)'), Name.Class, 'string_unquoted'), + + (r'\b[A-Z]', Keyword, 'command'), + (r'(\.{3}|[)(,])', Punctuation), + ], + 'command': [ + (r'( ?[\w()-]+ ?)', Keyword), + + include('string_interpolated'), + + (r'\.{3}', Keyword, ('#pop', 'old_arguments')), + (r':', Keyword, ('#pop', 'comma_list')), + (r'\s', Text, '#pop'), + ], + 'procedure_call': [ + (r'\s+', Text), + (r'([\w.]+)(:|\s*\()', + bygroups(Name.Function, Text), '#pop'), + (r'([\w.]+)', Name.Function, ('#pop', 'old_arguments')), + ], + 'procedure_definition': [ + (r'\s', Text), + (r'([\w.]+)(\s*?[(:])', + bygroups(Name.Function, Text), '#pop'), + (r'([\w.]+)([^\n]*)', + bygroups(Name.Function, Text), '#pop'), + ], + 'function_call': [ + (words(functions_string, suffix=r'\$(?=\s*[:(])'), Name.Function, 'function'), + (words(functions_array, suffix=r'#(?=\s*[:(])'), Name.Function, 'function'), + (words(functions_numeric, suffix=r'(?=\s*[:(])'), Name.Function, 'function'), + ], + 'function': [ + (r'\s+', Text), + (r':', Punctuation, ('#pop', 'comma_list')), + (r'\s*\(', Punctuation, ('#pop', 'comma_list')), + ], + 'comma_list': [ + (r'(\s*\n\s*)(\.{3})', bygroups(Text, Punctuation)), + + (r'(\s*[])\n])', Text, '#pop'), + + (r'\s+', Text), + (r'"', String, 'string'), + (r'\b(if|then|else|fi|endif)\b', Keyword), + + include('function_call'), + include('variable_name'), + include('operator'), + include('number'), + + (r'[()]', Text), + (r',', Punctuation), + ], + 'old_arguments': [ + (r'\n', Text, '#pop'), + + include('variable_name'), + include('operator'), + include('number'), + + (r'"', String, 'string'), + (r'[^\n]', Text), + ], + 'number': [ + (r'\n', Text, '#pop'), + (r'\b\d+(\.\d*)?([eE][-+]?\d+)?%?', Number), + ], + 'object_reference': [ + include('string_interpolated'), + (r'([a-z][a-zA-Z0-9_]*|\d+)', Name.Builtin), + + (words(object_attributes, prefix=r'\.'), Name.Builtin, '#pop'), + + (r'\$', Name.Builtin), + (r'\[', Text, '#pop'), + ], + 'variable_name': [ + include('operator'), + include('number'), + + (words(variables_string, suffix=r'\$'), Name.Variable.Global), + (words(variables_numeric, + suffix=r'(?=[^a-zA-Z0-9_."\'$#\[:(]|\s|^|$)'), + Name.Variable.Global), + + (words(objects, prefix=r'\b', suffix=r"(_)"), + bygroups(Name.Builtin, Name.Builtin), + 'object_reference'), + + (r'\.?_?[a-z][\w.]*(\$|#)?', Text), + (r'[\[\]]', Punctuation, 'comma_list'), + + include('string_interpolated'), + ], + 'operator': [ + (r'([+\/*<>=!-]=?|[&*|][&*|]?|\^|<>)', Operator), + (r'(?', Punctuation), - (r'"(?:\\x[0-9a-fA-F]+\\|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|' - r'\\[0-7]+\\|\\["\nabcefnrstv]|[^\\"])*"', String.Double), - (r"'(?:''|[^'])*'", String.Atom), # quoted atom - # Needs to not be followed by an atom. - # (r'=(?=\s|[a-zA-Z\[])', Operator), - (r'is\b', Operator), - (r'(<|>|=<|>=|==|=:=|=|/|//|\*|\+|-)(?=\s|[a-zA-Z0-9\[])', - Operator), - (r'(mod|div|not)\b', Operator), - (r'_', Keyword), # The don't-care variable - (r'([a-z]+)(:)', bygroups(Name.Namespace, Punctuation)), - (u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]' - u'[\\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)' - u'(\\s*)(:-|-->)', - bygroups(Name.Function, Text, Operator)), # function defn - (u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]' - u'[\\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)' - u'(\\s*)(\\()', - bygroups(Name.Function, Text, Punctuation)), - (u'[a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]' - u'[\\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*', - String.Atom), # atom, characters - # This one includes ! - (u'[#&*+\\-./:<=>?@\\\\^~\u00a1-\u00bf\u2010-\u303f]+', - String.Atom), # atom, graphics - (r'[A-Z_]\w*', Name.Variable), - (u'\\s+|[\u2000-\u200f\ufff0-\ufffe\uffef]', Text), - ], - 'nested-comment': [ - (r'\*/', Comment.Multiline, '#pop'), - (r'/\*', Comment.Multiline, '#push'), - (r'[^*/]+', Comment.Multiline), - (r'[*/]', Comment.Multiline), - ], - } - - def analyse_text(text): - return ':-' in text - - -class LogtalkLexer(RegexLexer): - """ - For `Logtalk `_ source code. - - .. versionadded:: 0.10 - """ - - name = 'Logtalk' - aliases = ['logtalk'] - filenames = ['*.lgt', '*.logtalk'] - mimetypes = ['text/x-logtalk'] - - tokens = { - 'root': [ - # Directives - (r'^\s*:-\s', Punctuation, 'directive'), - # Comments - (r'%.*?\n', Comment), - (r'/\*(.|\n)*?\*/', Comment), - # Whitespace - (r'\n', Text), - (r'\s+', Text), - # Numbers - (r"0'[\\]?.", Number), - (r'0b[01]+', Number.Bin), - (r'0o[0-7]+', Number.Oct), - (r'0x[0-9a-fA-F]+', Number.Hex), - (r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number), - # Variables - (r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable), - # Event handlers - (r'(after|before)(?=[(])', Keyword), - # Message forwarding handler - (r'forward(?=[(])', Keyword), - # Execution-context methods - (r'(context|parameter|this|se(lf|nder))(?=[(])', Keyword), - # Reflection - (r'(current_predicate|predicate_property)(?=[(])', Keyword), - # DCGs and term expansion - (r'(expand_(goal|term)|(goal|term)_expansion|phrase)(?=[(])', Keyword), - # Entity - (r'(abolish|c(reate|urrent))_(object|protocol|category)(?=[(])', Keyword), - (r'(object|protocol|category)_property(?=[(])', Keyword), - # Entity relations - (r'co(mplements_object|nforms_to_protocol)(?=[(])', Keyword), - (r'extends_(object|protocol|category)(?=[(])', Keyword), - (r'imp(lements_protocol|orts_category)(?=[(])', Keyword), - (r'(instantiat|specializ)es_class(?=[(])', Keyword), - # Events - (r'(current_event|(abolish|define)_events)(?=[(])', Keyword), - # Flags - (r'(create|current|set)_logtalk_flag(?=[(])', Keyword), - # Compiling, loading, and library paths - (r'logtalk_(compile|l(ibrary_path|oad|oad_context)|make(_target_action)?)(?=[(])', Keyword), - (r'\blogtalk_make\b', Keyword), - # Database - (r'(clause|retract(all)?)(?=[(])', Keyword), - (r'a(bolish|ssert(a|z))(?=[(])', Keyword), - # Control constructs - (r'(ca(ll|tch)|throw)(?=[(])', Keyword), - (r'(fa(il|lse)|true|(instantiation|system)_error)\b', Keyword), - (r'(type|domain|existence|permission|representation|evaluation|resource|syntax)_error(?=[(])', Keyword), - # All solutions - (r'((bag|set)of|f(ind|or)all)(?=[(])', Keyword), - # Multi-threading predicates - (r'threaded(_(ca(ll|ncel)|once|ignore|exit|peek|wait|notify))?(?=[(])', Keyword), - # Engine predicates - (r'threaded_engine(_(create|destroy|self|next|next_reified|yield|post|fetch))?(?=[(])', Keyword), - # Term unification - (r'(subsumes_term|unify_with_occurs_check)(?=[(])', Keyword), - # Term creation and decomposition - (r'(functor|arg|copy_term|numbervars|term_variables)(?=[(])', Keyword), - # Evaluable functors - (r'(div|rem|m(ax|in|od)|abs|sign)(?=[(])', Keyword), - (r'float(_(integer|fractional)_part)?(?=[(])', Keyword), - (r'(floor|t(an|runcate)|round|ceiling)(?=[(])', Keyword), - # Other arithmetic functors - (r'(cos|a(cos|sin|tan|tan2)|exp|log|s(in|qrt)|xor)(?=[(])', Keyword), - # Term testing - (r'(var|atom(ic)?|integer|float|c(allable|ompound)|n(onvar|umber)|ground|acyclic_term)(?=[(])', Keyword), - # Term comparison - (r'compare(?=[(])', Keyword), - # Stream selection and control - (r'(curren|se)t_(in|out)put(?=[(])', Keyword), - (r'(open|close)(?=[(])', Keyword), - (r'flush_output(?=[(])', Keyword), - (r'(at_end_of_stream|flush_output)\b', Keyword), - (r'(stream_property|at_end_of_stream|set_stream_position)(?=[(])', Keyword), - # Character and byte input/output - (r'(nl|(get|peek|put)_(byte|c(har|ode)))(?=[(])', Keyword), - (r'\bnl\b', Keyword), - # Term input/output - (r'read(_term)?(?=[(])', Keyword), - (r'write(q|_(canonical|term))?(?=[(])', Keyword), - (r'(current_)?op(?=[(])', Keyword), - (r'(current_)?char_conversion(?=[(])', Keyword), - # Atomic term processing - (r'atom_(length|c(hars|o(ncat|des)))(?=[(])', Keyword), - (r'(char_code|sub_atom)(?=[(])', Keyword), - (r'number_c(har|ode)s(?=[(])', Keyword), - # Implementation defined hooks functions - (r'(se|curren)t_prolog_flag(?=[(])', Keyword), - (r'\bhalt\b', Keyword), - (r'halt(?=[(])', Keyword), - # Message sending operators - (r'(::|:|\^\^)', Operator), - # External call - (r'[{}]', Keyword), - # Logic and control - (r'(ignore|once)(?=[(])', Keyword), - (r'\brepeat\b', Keyword), - # Sorting - (r'(key)?sort(?=[(])', Keyword), - # Bitwise functors - (r'(>>|<<|/\\|\\\\|\\)', Operator), - # Predicate aliases - (r'\bas\b', Operator), - # Arithemtic evaluation - (r'\bis\b', Keyword), - # Arithemtic comparison - (r'(=:=|=\\=|<|=<|>=|>)', Operator), - # Term creation and decomposition - (r'=\.\.', Operator), - # Term unification - (r'(=|\\=)', Operator), - # Term comparison - (r'(==|\\==|@=<|@<|@>=|@>)', Operator), - # Evaluable functors - (r'(//|[-+*/])', Operator), - (r'\b(e|pi|div|mod|rem)\b', Operator), - # Other arithemtic functors - (r'\b\*\*\b', Operator), - # DCG rules - (r'-->', Operator), - # Control constructs - (r'([!;]|->)', Operator), - # Logic and control - (r'\\+', Operator), - # Mode operators - (r'[?@]', Operator), - # Existential quantifier - (r'\^', Operator), - # Strings - (r'"(\\\\|\\"|[^"])*"', String), - # Punctuation - (r'[()\[\],.|]', Text), - # Atoms - (r"[a-z][a-zA-Z0-9_]*", Text), - (r"'", String, 'quoted_atom'), - ], - - 'quoted_atom': [ - (r"''", String), - (r"'", String, '#pop'), - (r'\\([\\abfnrtv"\']|(x[a-fA-F0-9]+|[0-7]+)\\)', String.Escape), - (r"[^\\'\n]+", String), - (r'\\', String), - ], - - 'directive': [ - # Conditional compilation directives - (r'(el)?if(?=[(])', Keyword, 'root'), - (r'(e(lse|ndif))(?=[.])', Keyword, 'root'), - # Entity directives - (r'(category|object|protocol)(?=[(])', Keyword, 'entityrelations'), - (r'(end_(category|object|protocol))(?=[.])', Keyword, 'root'), - # Predicate scope directives - (r'(public|protected|private)(?=[(])', Keyword, 'root'), - # Other directives - (r'e(n(coding|sure_loaded)|xport)(?=[(])', Keyword, 'root'), - (r'in(clude|itialization|fo)(?=[(])', Keyword, 'root'), - (r'(built_in|dynamic|synchronized|threaded)(?=[.])', Keyword, 'root'), - (r'(alias|d(ynamic|iscontiguous)|m(eta_(non_terminal|predicate)|ode|ultifile)|s(et_(logtalk|prolog)_flag|ynchronized))(?=[(])', Keyword, 'root'), - (r'op(?=[(])', Keyword, 'root'), - (r'(c(alls|oinductive)|module|reexport|use(s|_module))(?=[(])', Keyword, 'root'), - (r'[a-z][a-zA-Z0-9_]*(?=[(])', Text, 'root'), - (r'[a-z][a-zA-Z0-9_]*(?=[.])', Text, 'root'), - ], - - 'entityrelations': [ - (r'(complements|extends|i(nstantiates|mp(lements|orts))|specializes)(?=[(])', Keyword), - # Numbers - (r"0'[\\]?.", Number), - (r'0b[01]+', Number.Bin), - (r'0o[0-7]+', Number.Oct), - (r'0x[0-9a-fA-F]+', Number.Hex), - (r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number), - # Variables - (r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable), - # Atoms - (r"[a-z][a-zA-Z0-9_]*", Text), - (r"'", String, 'quoted_atom'), - # Strings - (r'"(\\\\|\\"|[^"])*"', String), - # End of entity-opening directive - (r'([)]\.)', Text, 'root'), - # Scope operator - (r'(::)', Operator), - # Punctuation - (r'[()\[\],.|]', Text), - # Comments - (r'%.*?\n', Comment), - (r'/\*(.|\n)*?\*/', Comment), - # Whitespace - (r'\n', Text), - (r'\s+', Text), - ] - } - - def analyse_text(text): - if ':- object(' in text: - return 1.0 - elif ':- protocol(' in text: - return 1.0 - elif ':- category(' in text: - return 1.0 - elif re.search(r'^:-\s[a-z]', text, re.M): - return 0.9 - else: - return 0.0 +# -*- coding: utf-8 -*- +""" + pygments.lexers.prolog + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Prolog and Prolog-like languages. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['PrologLexer', 'LogtalkLexer'] + + +class PrologLexer(RegexLexer): + """ + Lexer for Prolog files. + """ + name = 'Prolog' + aliases = ['prolog'] + filenames = ['*.ecl', '*.prolog', '*.pro', '*.pl'] + mimetypes = ['text/x-prolog'] + + flags = re.UNICODE | re.MULTILINE + + tokens = { + 'root': [ + (r'/\*', Comment.Multiline, 'nested-comment'), + (r'%.*', Comment.Single), + # character literal + (r'0\'.', String.Char), + (r'0b[01]+', Number.Bin), + (r'0o[0-7]+', Number.Oct), + (r'0x[0-9a-fA-F]+', Number.Hex), + # literal with prepended base + (r'\d\d?\'[a-zA-Z0-9]+', Number.Integer), + (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), + (r'\d+', Number.Integer), + (r'[\[\](){}|.,;!]', Punctuation), + (r':-|-->', Punctuation), + (r'"(?:\\x[0-9a-fA-F]+\\|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|' + r'\\[0-7]+\\|\\["\nabcefnrstv]|[^\\"])*"', String.Double), + (r"'(?:''|[^'])*'", String.Atom), # quoted atom + # Needs to not be followed by an atom. + # (r'=(?=\s|[a-zA-Z\[])', Operator), + (r'is\b', Operator), + (r'(<|>|=<|>=|==|=:=|=|/|//|\*|\+|-)(?=\s|[a-zA-Z0-9\[])', + Operator), + (r'(mod|div|not)\b', Operator), + (r'_', Keyword), # The don't-care variable + (r'([a-z]+)(:)', bygroups(Name.Namespace, Punctuation)), + (r'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]' + r'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)' + r'(\s*)(:-|-->)', + bygroups(Name.Function, Text, Operator)), # function defn + (r'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]' + r'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)' + r'(\s*)(\()', + bygroups(Name.Function, Text, Punctuation)), + (r'[a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]' + r'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*', + String.Atom), # atom, characters + # This one includes ! + (r'[#&*+\-./:<=>?@\\^~\u00a1-\u00bf\u2010-\u303f]+', + String.Atom), # atom, graphics + (r'[A-Z_]\w*', Name.Variable), + (r'\s+|[\u2000-\u200f\ufff0-\ufffe\uffef]', Text), + ], + 'nested-comment': [ + (r'\*/', Comment.Multiline, '#pop'), + (r'/\*', Comment.Multiline, '#push'), + (r'[^*/]+', Comment.Multiline), + (r'[*/]', Comment.Multiline), + ], + } + + def analyse_text(text): + return ':-' in text + + +class LogtalkLexer(RegexLexer): + """ + For `Logtalk `_ source code. + + .. versionadded:: 0.10 + """ + + name = 'Logtalk' + aliases = ['logtalk'] + filenames = ['*.lgt', '*.logtalk'] + mimetypes = ['text/x-logtalk'] + + tokens = { + 'root': [ + # Directives + (r'^\s*:-\s', Punctuation, 'directive'), + # Comments + (r'%.*?\n', Comment), + (r'/\*(.|\n)*?\*/', Comment), + # Whitespace + (r'\n', Text), + (r'\s+', Text), + # Numbers + (r"0'[\\]?.", Number), + (r'0b[01]+', Number.Bin), + (r'0o[0-7]+', Number.Oct), + (r'0x[0-9a-fA-F]+', Number.Hex), + (r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number), + # Variables + (r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable), + # Event handlers + (r'(after|before)(?=[(])', Keyword), + # Message forwarding handler + (r'forward(?=[(])', Keyword), + # Execution-context methods + (r'(context|parameter|this|se(lf|nder))(?=[(])', Keyword), + # Reflection + (r'(current_predicate|predicate_property)(?=[(])', Keyword), + # DCGs and term expansion + (r'(expand_(goal|term)|(goal|term)_expansion|phrase)(?=[(])', Keyword), + # Entity + (r'(abolish|c(reate|urrent))_(object|protocol|category)(?=[(])', Keyword), + (r'(object|protocol|category)_property(?=[(])', Keyword), + # Entity relations + (r'co(mplements_object|nforms_to_protocol)(?=[(])', Keyword), + (r'extends_(object|protocol|category)(?=[(])', Keyword), + (r'imp(lements_protocol|orts_category)(?=[(])', Keyword), + (r'(instantiat|specializ)es_class(?=[(])', Keyword), + # Events + (r'(current_event|(abolish|define)_events)(?=[(])', Keyword), + # Flags + (r'(create|current|set)_logtalk_flag(?=[(])', Keyword), + # Compiling, loading, and library paths + (r'logtalk_(compile|l(ibrary_path|oad|oad_context)|make(_target_action)?)(?=[(])', Keyword), + (r'\blogtalk_make\b', Keyword), + # Database + (r'(clause|retract(all)?)(?=[(])', Keyword), + (r'a(bolish|ssert(a|z))(?=[(])', Keyword), + # Control constructs + (r'(ca(ll|tch)|throw)(?=[(])', Keyword), + (r'(fa(il|lse)|true|(instantiation|system)_error)\b', Keyword), + (r'(type|domain|existence|permission|representation|evaluation|resource|syntax)_error(?=[(])', Keyword), + # All solutions + (r'((bag|set)of|f(ind|or)all)(?=[(])', Keyword), + # Multi-threading predicates + (r'threaded(_(ca(ll|ncel)|once|ignore|exit|peek|wait|notify))?(?=[(])', Keyword), + # Engine predicates + (r'threaded_engine(_(create|destroy|self|next|next_reified|yield|post|fetch))?(?=[(])', Keyword), + # Term unification + (r'(subsumes_term|unify_with_occurs_check)(?=[(])', Keyword), + # Term creation and decomposition + (r'(functor|arg|copy_term|numbervars|term_variables)(?=[(])', Keyword), + # Evaluable functors + (r'(div|rem|m(ax|in|od)|abs|sign)(?=[(])', Keyword), + (r'float(_(integer|fractional)_part)?(?=[(])', Keyword), + (r'(floor|t(an|runcate)|round|ceiling)(?=[(])', Keyword), + # Other arithmetic functors + (r'(cos|a(cos|sin|tan|tan2)|exp|log|s(in|qrt)|xor)(?=[(])', Keyword), + # Term testing + (r'(var|atom(ic)?|integer|float|c(allable|ompound)|n(onvar|umber)|ground|acyclic_term)(?=[(])', Keyword), + # Term comparison + (r'compare(?=[(])', Keyword), + # Stream selection and control + (r'(curren|se)t_(in|out)put(?=[(])', Keyword), + (r'(open|close)(?=[(])', Keyword), + (r'flush_output(?=[(])', Keyword), + (r'(at_end_of_stream|flush_output)\b', Keyword), + (r'(stream_property|at_end_of_stream|set_stream_position)(?=[(])', Keyword), + # Character and byte input/output + (r'(nl|(get|peek|put)_(byte|c(har|ode)))(?=[(])', Keyword), + (r'\bnl\b', Keyword), + # Term input/output + (r'read(_term)?(?=[(])', Keyword), + (r'write(q|_(canonical|term))?(?=[(])', Keyword), + (r'(current_)?op(?=[(])', Keyword), + (r'(current_)?char_conversion(?=[(])', Keyword), + # Atomic term processing + (r'atom_(length|c(hars|o(ncat|des)))(?=[(])', Keyword), + (r'(char_code|sub_atom)(?=[(])', Keyword), + (r'number_c(har|ode)s(?=[(])', Keyword), + # Implementation defined hooks functions + (r'(se|curren)t_prolog_flag(?=[(])', Keyword), + (r'\bhalt\b', Keyword), + (r'halt(?=[(])', Keyword), + # Message sending operators + (r'(::|:|\^\^)', Operator), + # External call + (r'[{}]', Keyword), + # Logic and control + (r'(ignore|once)(?=[(])', Keyword), + (r'\brepeat\b', Keyword), + # Sorting + (r'(key)?sort(?=[(])', Keyword), + # Bitwise functors + (r'(>>|<<|/\\|\\\\|\\)', Operator), + # Predicate aliases + (r'\bas\b', Operator), + # Arithemtic evaluation + (r'\bis\b', Keyword), + # Arithemtic comparison + (r'(=:=|=\\=|<|=<|>=|>)', Operator), + # Term creation and decomposition + (r'=\.\.', Operator), + # Term unification + (r'(=|\\=)', Operator), + # Term comparison + (r'(==|\\==|@=<|@<|@>=|@>)', Operator), + # Evaluable functors + (r'(//|[-+*/])', Operator), + (r'\b(e|pi|div|mod|rem)\b', Operator), + # Other arithemtic functors + (r'\b\*\*\b', Operator), + # DCG rules + (r'-->', Operator), + # Control constructs + (r'([!;]|->)', Operator), + # Logic and control + (r'\\+', Operator), + # Mode operators + (r'[?@]', Operator), + # Existential quantifier + (r'\^', Operator), + # Strings + (r'"(\\\\|\\"|[^"])*"', String), + # Punctuation + (r'[()\[\],.|]', Text), + # Atoms + (r"[a-z][a-zA-Z0-9_]*", Text), + (r"'", String, 'quoted_atom'), + ], + + 'quoted_atom': [ + (r"''", String), + (r"'", String, '#pop'), + (r'\\([\\abfnrtv"\']|(x[a-fA-F0-9]+|[0-7]+)\\)', String.Escape), + (r"[^\\'\n]+", String), + (r'\\', String), + ], + + 'directive': [ + # Conditional compilation directives + (r'(el)?if(?=[(])', Keyword, 'root'), + (r'(e(lse|ndif))(?=[.])', Keyword, 'root'), + # Entity directives + (r'(category|object|protocol)(?=[(])', Keyword, 'entityrelations'), + (r'(end_(category|object|protocol))(?=[.])', Keyword, 'root'), + # Predicate scope directives + (r'(public|protected|private)(?=[(])', Keyword, 'root'), + # Other directives + (r'e(n(coding|sure_loaded)|xport)(?=[(])', Keyword, 'root'), + (r'in(clude|itialization|fo)(?=[(])', Keyword, 'root'), + (r'(built_in|dynamic|synchronized|threaded)(?=[.])', Keyword, 'root'), + (r'(alias|d(ynamic|iscontiguous)|m(eta_(non_terminal|predicate)|ode|ultifile)|s(et_(logtalk|prolog)_flag|ynchronized))(?=[(])', Keyword, 'root'), + (r'op(?=[(])', Keyword, 'root'), + (r'(c(alls|oinductive)|module|reexport|use(s|_module))(?=[(])', Keyword, 'root'), + (r'[a-z][a-zA-Z0-9_]*(?=[(])', Text, 'root'), + (r'[a-z][a-zA-Z0-9_]*(?=[.])', Text, 'root'), + ], + + 'entityrelations': [ + (r'(complements|extends|i(nstantiates|mp(lements|orts))|specializes)(?=[(])', Keyword), + # Numbers + (r"0'[\\]?.", Number), + (r'0b[01]+', Number.Bin), + (r'0o[0-7]+', Number.Oct), + (r'0x[0-9a-fA-F]+', Number.Hex), + (r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number), + # Variables + (r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable), + # Atoms + (r"[a-z][a-zA-Z0-9_]*", Text), + (r"'", String, 'quoted_atom'), + # Strings + (r'"(\\\\|\\"|[^"])*"', String), + # End of entity-opening directive + (r'([)]\.)', Text, 'root'), + # Scope operator + (r'(::)', Operator), + # Punctuation + (r'[()\[\],.|]', Text), + # Comments + (r'%.*?\n', Comment), + (r'/\*(.|\n)*?\*/', Comment), + # Whitespace + (r'\n', Text), + (r'\s+', Text), + ] + } + + def analyse_text(text): + if ':- object(' in text: + return 1.0 + elif ':- protocol(' in text: + return 1.0 + elif ':- category(' in text: + return 1.0 + elif re.search(r'^:-\s[a-z]', text, re.M): + return 0.9 + else: + return 0.0 diff --git a/pygments/lexers/promql.py b/pygments/lexers/promql.py new file mode 100755 index 0000000..3cf6cc6 --- /dev/null +++ b/pygments/lexers/promql.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- +""" + pygments.lexers.promql + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexer for Prometheus Query Language. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, bygroups, default, words +from pygments.token import ( + Comment, + Keyword, + Name, + Number, + Operator, + Punctuation, + String, + Whitespace, +) + +__all__ = ["PromQLLexer"] + + +class PromQLLexer(RegexLexer): + """ + For `PromQL `_ queries. + + For details about the grammar see: + https://github.com/prometheus/prometheus/tree/master/promql/parser + + .. versionadded: 2.7 + """ + + name = "PromQL" + aliases = ["promql"] + filenames = ["*.promql"] + + base_keywords = ( + words( + ( + "bool", + "by", + "group_left", + "group_right", + "ignoring", + "offset", + "on", + "without", + ), + suffix=r"\b", + ), + Keyword, + ) + + aggregator_keywords = ( + words( + ( + "sum", + "min", + "max", + "avg", + "group", + "stddev", + "stdvar", + "count", + "count_values", + "bottomk", + "topk", + "quantile", + ), + suffix=r"\b", + ), + Keyword, + ) + + function_keywords = ( + words( + ( + "abs", + "absent", + "absent_over_time", + "avg_over_time", + "ceil", + "changes", + "clamp_max", + "clamp_min", + "count_over_time", + "day_of_month", + "day_of_week", + "days_in_month", + "delta", + "deriv", + "exp", + "floor", + "histogram_quantile", + "holt_winters", + "hour", + "idelta", + "increase", + "irate", + "label_join", + "label_replace", + "ln", + "log10", + "log2", + "max_over_time", + "min_over_time", + "minute", + "month", + "predict_linear", + "quantile_over_time", + "rate", + "resets", + "round", + "scalar", + "sort", + "sort_desc", + "sqrt", + "stddev_over_time", + "stdvar_over_time", + "sum_over_time", + "time", + "timestamp", + "vector", + "year", + ), + suffix=r"\b", + ), + Keyword.Reserved, + ) + + tokens = { + "root": [ + (r"\n", Whitespace), + (r"\s+", Whitespace), + (r",", Punctuation), + # Keywords + base_keywords, + aggregator_keywords, + function_keywords, + # Offsets + (r"[1-9][0-9]*[smhdwy]", String), + # Numbers + (r"-?[0-9]+\.[0-9]+", Number.Float), + (r"-?[0-9]+", Number.Integer), + # Comments + (r"#.*?$", Comment.Single), + # Operators + (r"(\+|\-|\*|\/|\%|\^)", Operator), + (r"==|!=|>=|<=|<|>", Operator), + (r"and|or|unless", Operator.Word), + # Metrics + (r"[_a-zA-Z][a-zA-Z0-9_]+", Name.Variable), + # Params + (r'(["\'])(.*?)(["\'])', bygroups(Punctuation, String, Punctuation)), + # Other states + (r"\(", Operator, "function"), + (r"\)", Operator), + (r"\{", Punctuation, "labels"), + (r"\[", Punctuation, "range"), + ], + "labels": [ + (r"\}", Punctuation, "#pop"), + (r"\n", Whitespace), + (r"\s+", Whitespace), + (r",", Punctuation), + (r'([_a-zA-Z][a-zA-Z0-9_]*?)(\s*?)(=~|!=|=|~!)(\s*?)(")(.*?)(")', + bygroups(Name.Label, Whitespace, Operator, Whitespace, + Punctuation, String, Punctuation)), + ], + "range": [ + (r"\]", Punctuation, "#pop"), + (r"[1-9][0-9]*[smhdwy]", String), + ], + "function": [ + (r"\)", Operator, "#pop"), + (r"\(", Operator, "#push"), + default("#pop"), + ], + } diff --git a/pygments/lexers/python.py b/pygments/lexers/python.py old mode 100644 new mode 100755 index eef2088..8cc9768 --- a/pygments/lexers/python.py +++ b/pygments/lexers/python.py @@ -1,1153 +1,1151 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.python - ~~~~~~~~~~~~~~~~~~~~~~ - - Lexers for Python and related languages. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \ - default, words, combined, do_insertions -from pygments.util import get_bool_opt, shebang_matches -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Generic, Other, Error -from pygments import unistring as uni - -__all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer', - 'Python2Lexer', 'Python2TracebackLexer', - 'CythonLexer', 'DgLexer', 'NumPyLexer'] - -line_re = re.compile('.*?\n') - - -class PythonLexer(RegexLexer): - """ - For `Python `_ source code (version 3.x). - - .. versionadded:: 0.10 - - .. versionchanged:: 2.5 - This is now the default ``PythonLexer``. It is still available as the - alias ``Python3Lexer``. - """ - - name = 'Python' - aliases = ['python', 'py', 'sage', 'python3', 'py3'] - filenames = [ - '*.py', - '*.pyw', - # Jython - '*.jy', - # Sage - '*.sage', - # SCons - '*.sc', - 'SConstruct', - 'SConscript', - # Skylark/Starlark (used by Bazel, Buck, and Pants) - '*.bzl', - 'BUCK', - 'BUILD', - 'BUILD.bazel', - 'WORKSPACE', - # Twisted Application infrastructure - '*.tac', - ] - mimetypes = ['text/x-python', 'application/x-python', - 'text/x-python3', 'application/x-python3'] - - flags = re.MULTILINE | re.UNICODE - - uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue) - - def innerstring_rules(ttype): - return [ - # the old style '%s' % (...) string formatting (still valid in Py3) - (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' - '[hlL]?[E-GXc-giorsaux%]', String.Interpol), - # the new style '{}'.format(...) string formatting - (r'\{' - r'((\w+)((\.\w+)|(\[[^\]]+\]))*)?' # field name - r'(\![sra])?' # conversion - r'(\:(.?[<>=\^])?[-+ ]?#?0?(\d+)?,?(\.\d+)?[E-GXb-gnosx%]?)?' - r'\}', String.Interpol), - - # backslashes, quotes and formatting signs must be parsed one at a time - (r'[^\\\'"%{\n]+', ttype), - (r'[\'"\\]', ttype), - # unhandled string formatting sign - (r'%|(\{{1,2})', ttype) - # newlines are an error (use "nl" state) - ] - - def fstring_rules(ttype): - return [ - # Assuming that a '}' is the closing brace after format specifier. - # Sadly, this means that we won't detect syntax error. But it's - # more important to parse correct syntax correctly, than to - # highlight invalid syntax. - (r'\}', String.Interpol), - (r'\{', String.Interpol, 'expr-inside-fstring'), - # backslashes, quotes and formatting signs must be parsed one at a time - (r'[^\\\'"{}\n]+', ttype), - (r'[\'"\\]', ttype), - # newlines are an error (use "nl" state) - ] - - tokens = { - 'root': [ - (r'\n', Text), - (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")', - bygroups(Text, String.Affix, String.Doc)), - (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')", - bygroups(Text, String.Affix, String.Doc)), - (r'\A#!.+$', Comment.Hashbang), - (r'#.*$', Comment.Single), - (r'\\\n', Text), - (r'\\', Text), - include('keywords'), - (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'), - (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'), - (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), - 'fromimport'), - (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), - 'import'), - include('expr'), - ], - 'expr': [ - # raw f-strings - ('(?i)(rf|fr)(""")', - bygroups(String.Affix, String.Double), 'tdqf'), - ("(?i)(rf|fr)(''')", - bygroups(String.Affix, String.Single), 'tsqf'), - ('(?i)(rf|fr)(")', - bygroups(String.Affix, String.Double), 'dqf'), - ("(?i)(rf|fr)(')", - bygroups(String.Affix, String.Single), 'sqf'), - # non-raw f-strings - ('([fF])(""")', bygroups(String.Affix, String.Double), - combined('fstringescape', 'tdqf')), - ("([fF])(''')", bygroups(String.Affix, String.Single), - combined('fstringescape', 'tsqf')), - ('([fF])(")', bygroups(String.Affix, String.Double), - combined('fstringescape', 'dqf')), - ("([fF])(')", bygroups(String.Affix, String.Single), - combined('fstringescape', 'sqf')), - # raw strings - ('(?i)(rb|br|r)(""")', - bygroups(String.Affix, String.Double), 'tdqs'), - ("(?i)(rb|br|r)(''')", - bygroups(String.Affix, String.Single), 'tsqs'), - ('(?i)(rb|br|r)(")', - bygroups(String.Affix, String.Double), 'dqs'), - ("(?i)(rb|br|r)(')", - bygroups(String.Affix, String.Single), 'sqs'), - # non-raw strings - ('([uUbB]?)(""")', bygroups(String.Affix, String.Double), - combined('stringescape', 'tdqs')), - ("([uUbB]?)(''')", bygroups(String.Affix, String.Single), - combined('stringescape', 'tsqs')), - ('([uUbB]?)(")', bygroups(String.Affix, String.Double), - combined('stringescape', 'dqs')), - ("([uUbB]?)(')", bygroups(String.Affix, String.Single), - combined('stringescape', 'sqs')), - (r'[^\S\n]+', Text), - (r'!=|==|<<|>>|:=|[-~+/*%=<>&^|.]', Operator), - (r'[]{}:(),;[]', Punctuation), - (r'(in|is|and|or|not)\b', Operator.Word), - include('expr-keywords'), - include('builtins'), - include('magicfuncs'), - include('magicvars'), - include('name'), - include('numbers'), - ], - 'expr-inside-fstring': [ - (r'[{([]', Punctuation, 'expr-inside-fstring-inner'), - # without format specifier - (r'(=\s*)?' # debug (https://bugs.python.org/issue36817) - r'(\![sraf])?' # conversion - r'}', String.Interpol, '#pop'), - # with format specifier - # we'll catch the remaining '}' in the outer scope - (r'(=\s*)?' # debug (https://bugs.python.org/issue36817) - r'(\![sraf])?' # conversion - r':', String.Interpol, '#pop'), - (r'[^\S]+', Text), # allow new lines - include('expr'), - ], - 'expr-inside-fstring-inner': [ - (r'[{([]', Punctuation, 'expr-inside-fstring-inner'), - (r'[])}]', Punctuation, '#pop'), - (r'[^\S]+', Text), # allow new lines - include('expr'), - ], - 'expr-keywords': [ - # Based on https://docs.python.org/3/reference/expressions.html - (words(( - 'async for', 'await', 'else', 'for', 'if', 'lambda', - 'yield', 'yield from'), suffix=r'\b'), - Keyword), - (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant), - ], - 'keywords': [ - (words(( - 'assert', 'async', 'await', 'break', 'continue', 'del', 'elif', - 'else', 'except', 'finally', 'for', 'global', 'if', 'lambda', - 'pass', 'raise', 'nonlocal', 'return', 'try', 'while', 'yield', - 'yield from', 'as', 'with'), suffix=r'\b'), - Keyword), - (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant), - ], - 'builtins': [ - (words(( - '__import__', 'abs', 'all', 'any', 'bin', 'bool', 'bytearray', - 'bytes', 'chr', 'classmethod', 'cmp', 'compile', 'complex', - 'delattr', 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'filter', - 'float', 'format', 'frozenset', 'getattr', 'globals', 'hasattr', - 'hash', 'hex', 'id', 'input', 'int', 'isinstance', 'issubclass', - 'iter', 'len', 'list', 'locals', 'map', 'max', 'memoryview', - 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'print', - 'property', 'range', 'repr', 'reversed', 'round', 'set', 'setattr', - 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', - 'type', 'vars', 'zip'), prefix=r'(?`_ source code. - - .. versionchanged:: 2.5 - This class has been renamed from ``PythonLexer``. ``PythonLexer`` now - refers to the Python 3 variant. File name patterns like ``*.py`` have - been moved to Python 3 as well. - """ - - name = 'Python 2.x' - aliases = ['python2', 'py2'] - filenames = [] # now taken over by PythonLexer (3.x) - mimetypes = ['text/x-python2', 'application/x-python2'] - - def innerstring_rules(ttype): - return [ - # the old style '%s' % (...) string formatting - (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' - '[hlL]?[E-GXc-giorsux%]', String.Interpol), - # backslashes, quotes and formatting signs must be parsed one at a time - (r'[^\\\'"%\n]+', ttype), - (r'[\'"\\]', ttype), - # unhandled string formatting sign - (r'%', ttype), - # newlines are an error (use "nl" state) - ] - - tokens = { - 'root': [ - (r'\n', Text), - (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")', - bygroups(Text, String.Affix, String.Doc)), - (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')", - bygroups(Text, String.Affix, String.Doc)), - (r'[^\S\n]+', Text), - (r'\A#!.+$', Comment.Hashbang), - (r'#.*$', Comment.Single), - (r'[]{}:(),;[]', Punctuation), - (r'\\\n', Text), - (r'\\', Text), - (r'(in|is|and|or|not)\b', Operator.Word), - (r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator), - include('keywords'), - (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'), - (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'), - (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), - 'fromimport'), - (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), - 'import'), - include('builtins'), - include('magicfuncs'), - include('magicvars'), - include('backtick'), - ('([rR]|[uUbB][rR]|[rR][uUbB])(""")', - bygroups(String.Affix, String.Double), 'tdqs'), - ("([rR]|[uUbB][rR]|[rR][uUbB])(''')", - bygroups(String.Affix, String.Single), 'tsqs'), - ('([rR]|[uUbB][rR]|[rR][uUbB])(")', - bygroups(String.Affix, String.Double), 'dqs'), - ("([rR]|[uUbB][rR]|[rR][uUbB])(')", - bygroups(String.Affix, String.Single), 'sqs'), - ('([uUbB]?)(""")', bygroups(String.Affix, String.Double), - combined('stringescape', 'tdqs')), - ("([uUbB]?)(''')", bygroups(String.Affix, String.Single), - combined('stringescape', 'tsqs')), - ('([uUbB]?)(")', bygroups(String.Affix, String.Double), - combined('stringescape', 'dqs')), - ("([uUbB]?)(')", bygroups(String.Affix, String.Single), - combined('stringescape', 'sqs')), - include('name'), - include('numbers'), - ], - 'keywords': [ - (words(( - 'assert', 'break', 'continue', 'del', 'elif', 'else', 'except', - 'exec', 'finally', 'for', 'global', 'if', 'lambda', 'pass', - 'print', 'raise', 'return', 'try', 'while', 'yield', - 'yield from', 'as', 'with'), suffix=r'\b'), - Keyword), - ], - 'builtins': [ - (words(( - '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', - 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', 'classmethod', - 'cmp', 'coerce', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod', - 'enumerate', 'eval', 'execfile', 'exit', 'file', 'filter', 'float', - 'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id', - 'input', 'int', 'intern', 'isinstance', 'issubclass', 'iter', 'len', - 'list', 'locals', 'long', 'map', 'max', 'min', 'next', 'object', - 'oct', 'open', 'ord', 'pow', 'property', 'range', 'raw_input', 'reduce', - 'reload', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice', - 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type', - 'unichr', 'unicode', 'vars', 'xrange', 'zip'), - prefix=r'(?>> a = 'foo' - >>> print a - foo - >>> 1 / 0 - Traceback (most recent call last): - File "", line 1, in - ZeroDivisionError: integer division or modulo by zero - - Additional options: - - `python3` - Use Python 3 lexer for code. Default is ``True``. - - .. versionadded:: 1.0 - .. versionchanged:: 2.5 - Now defaults to ``True``. - """ - name = 'Python console session' - aliases = ['pycon'] - mimetypes = ['text/x-python-doctest'] - - def __init__(self, **options): - self.python3 = get_bool_opt(options, 'python3', True) - Lexer.__init__(self, **options) - - def get_tokens_unprocessed(self, text): - if self.python3: - pylexer = PythonLexer(**self.options) - tblexer = PythonTracebackLexer(**self.options) - else: - pylexer = Python2Lexer(**self.options) - tblexer = Python2TracebackLexer(**self.options) - - curcode = '' - insertions = [] - curtb = '' - tbindex = 0 - tb = 0 - for match in line_re.finditer(text): - line = match.group() - if line.startswith(u'>>> ') or line.startswith(u'... '): - tb = 0 - insertions.append((len(curcode), - [(0, Generic.Prompt, line[:4])])) - curcode += line[4:] - elif line.rstrip() == u'...' and not tb: - # only a new >>> prompt can end an exception block - # otherwise an ellipsis in place of the traceback frames - # will be mishandled - insertions.append((len(curcode), - [(0, Generic.Prompt, u'...')])) - curcode += line[3:] - else: - if curcode: - for item in do_insertions( - insertions, pylexer.get_tokens_unprocessed(curcode)): - yield item - curcode = '' - insertions = [] - if (line.startswith(u'Traceback (most recent call last):') or - re.match(u' File "[^"]+", line \\d+\\n$', line)): - tb = 1 - curtb = line - tbindex = match.start() - elif line == 'KeyboardInterrupt\n': - yield match.start(), Name.Class, line - elif tb: - curtb += line - if not (line.startswith(' ') or line.strip() == u'...'): - tb = 0 - for i, t, v in tblexer.get_tokens_unprocessed(curtb): - yield tbindex+i, t, v - curtb = '' - else: - yield match.start(), Generic.Output, line - if curcode: - for item in do_insertions(insertions, - pylexer.get_tokens_unprocessed(curcode)): - yield item - if curtb: - for i, t, v in tblexer.get_tokens_unprocessed(curtb): - yield tbindex+i, t, v - - -class PythonTracebackLexer(RegexLexer): - """ - For Python 3.x tracebacks, with support for chained exceptions. - - .. versionadded:: 1.0 - - .. versionchanged:: 2.5 - This is now the default ``PythonTracebackLexer``. It is still available - as the alias ``Python3TracebackLexer``. - """ - - name = 'Python Traceback' - aliases = ['pytb', 'py3tb'] - filenames = ['*.pytb', '*.py3tb'] - mimetypes = ['text/x-python-traceback', 'text/x-python3-traceback'] - - tokens = { - 'root': [ - (r'\n', Text), - (r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'), - (r'^During handling of the above exception, another ' - r'exception occurred:\n\n', Generic.Traceback), - (r'^The above exception was the direct cause of the ' - r'following exception:\n\n', Generic.Traceback), - (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'), - (r'^.*\n', Other), - ], - 'intb': [ - (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)', - bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)), - (r'^( File )("[^"]+")(, line )(\d+)(\n)', - bygroups(Text, Name.Builtin, Text, Number, Text)), - (r'^( )(.+)(\n)', - bygroups(Text, using(PythonLexer), Text)), - (r'^([ \t]*)(\.\.\.)(\n)', - bygroups(Text, Comment, Text)), # for doctests... - (r'^([^:]+)(: )(.+)(\n)', - bygroups(Generic.Error, Text, Name, Text), '#pop'), - (r'^([a-zA-Z_]\w*)(:?\n)', - bygroups(Generic.Error, Text), '#pop') - ], - } - - -Python3TracebackLexer = PythonTracebackLexer - - -class Python2TracebackLexer(RegexLexer): - """ - For Python tracebacks. - - .. versionadded:: 0.7 - - .. versionchanged:: 2.5 - This class has been renamed from ``PythonTracebackLexer``. - ``PythonTracebackLexer`` now refers to the Python 3 variant. - """ - - name = 'Python 2.x Traceback' - aliases = ['py2tb'] - filenames = ['*.py2tb'] - mimetypes = ['text/x-python2-traceback'] - - tokens = { - 'root': [ - # Cover both (most recent call last) and (innermost last) - # The optional ^C allows us to catch keyboard interrupt signals. - (r'^(\^C)?(Traceback.*\n)', - bygroups(Text, Generic.Traceback), 'intb'), - # SyntaxError starts with this. - (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'), - (r'^.*\n', Other), - ], - 'intb': [ - (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)', - bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)), - (r'^( File )("[^"]+")(, line )(\d+)(\n)', - bygroups(Text, Name.Builtin, Text, Number, Text)), - (r'^( )(.+)(\n)', - bygroups(Text, using(Python2Lexer), Text)), - (r'^([ \t]*)(\.\.\.)(\n)', - bygroups(Text, Comment, Text)), # for doctests... - (r'^([^:]+)(: )(.+)(\n)', - bygroups(Generic.Error, Text, Name, Text), '#pop'), - (r'^([a-zA-Z_]\w*)(:?\n)', - bygroups(Generic.Error, Text), '#pop') - ], - } - - -class CythonLexer(RegexLexer): - """ - For Pyrex and `Cython `_ source code. - - .. versionadded:: 1.1 - """ - - name = 'Cython' - aliases = ['cython', 'pyx', 'pyrex'] - filenames = ['*.pyx', '*.pxd', '*.pxi'] - mimetypes = ['text/x-cython', 'application/x-cython'] - - tokens = { - 'root': [ - (r'\n', Text), - (r'^(\s*)("""(?:.|\n)*?""")', bygroups(Text, String.Doc)), - (r"^(\s*)('''(?:.|\n)*?''')", bygroups(Text, String.Doc)), - (r'[^\S\n]+', Text), - (r'#.*$', Comment), - (r'[]{}:(),;[]', Punctuation), - (r'\\\n', Text), - (r'\\', Text), - (r'(in|is|and|or|not)\b', Operator.Word), - (r'(<)([a-zA-Z0-9.?]+)(>)', - bygroups(Punctuation, Keyword.Type, Punctuation)), - (r'!=|==|<<|>>|[-~+/*%=<>&^|.?]', Operator), - (r'(from)(\d+)(<=)(\s+)(<)(\d+)(:)', - bygroups(Keyword, Number.Integer, Operator, Name, Operator, - Name, Punctuation)), - include('keywords'), - (r'(def|property)(\s+)', bygroups(Keyword, Text), 'funcname'), - (r'(cp?def)(\s+)', bygroups(Keyword, Text), 'cdef'), - # (should actually start a block with only cdefs) - (r'(cdef)(:)', bygroups(Keyword, Punctuation)), - (r'(class|struct)(\s+)', bygroups(Keyword, Text), 'classname'), - (r'(from)(\s+)', bygroups(Keyword, Text), 'fromimport'), - (r'(c?import)(\s+)', bygroups(Keyword, Text), 'import'), - include('builtins'), - include('backtick'), - ('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'), - ("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'), - ('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'), - ("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'), - ('[uU]?"""', String, combined('stringescape', 'tdqs')), - ("[uU]?'''", String, combined('stringescape', 'tsqs')), - ('[uU]?"', String, combined('stringescape', 'dqs')), - ("[uU]?'", String, combined('stringescape', 'sqs')), - include('name'), - include('numbers'), - ], - 'keywords': [ - (words(( - 'assert', 'break', 'by', 'continue', 'ctypedef', 'del', 'elif', - 'else', 'except', 'except?', 'exec', 'finally', 'for', 'fused', 'gil', - 'global', 'if', 'include', 'lambda', 'nogil', 'pass', 'print', - 'raise', 'return', 'try', 'while', 'yield', 'as', 'with'), suffix=r'\b'), - Keyword), - (r'(DEF|IF|ELIF|ELSE)\b', Comment.Preproc), - ], - 'builtins': [ - (words(( - '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', - 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', - 'classmethod', 'cmp', 'coerce', 'compile', 'complex', 'delattr', - 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'execfile', 'exit', - 'file', 'filter', 'float', 'frozenset', 'getattr', 'globals', - 'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'intern', 'isinstance', - 'issubclass', 'iter', 'len', 'list', 'locals', 'long', 'map', 'max', - 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'property', - 'range', 'raw_input', 'reduce', 'reload', 'repr', 'reversed', - 'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod', - 'str', 'sum', 'super', 'tuple', 'type', 'unichr', 'unicode', 'unsigned', - 'vars', 'xrange', 'zip'), prefix=r'(?`_, - a functional and object-oriented programming language - running on the CPython 3 VM. - - .. versionadded:: 1.6 - """ - name = 'dg' - aliases = ['dg'] - filenames = ['*.dg'] - mimetypes = ['text/x-dg'] - - tokens = { - 'root': [ - (r'\s+', Text), - (r'#.*?$', Comment.Single), - - (r'(?i)0b[01]+', Number.Bin), - (r'(?i)0o[0-7]+', Number.Oct), - (r'(?i)0x[0-9a-f]+', Number.Hex), - (r'(?i)[+-]?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?j?', Number.Float), - (r'(?i)[+-]?[0-9]+e[+-]?\d+j?', Number.Float), - (r'(?i)[+-]?[0-9]+j?', Number.Integer), - - (r"(?i)(br|r?b?)'''", String, combined('stringescape', 'tsqs', 'string')), - (r'(?i)(br|r?b?)"""', String, combined('stringescape', 'tdqs', 'string')), - (r"(?i)(br|r?b?)'", String, combined('stringescape', 'sqs', 'string')), - (r'(?i)(br|r?b?)"', String, combined('stringescape', 'dqs', 'string')), - - (r"`\w+'*`", Operator), - (r'\b(and|in|is|or|where)\b', Operator.Word), - (r'[!$%&*+\-./:<-@\\^|~;,]+', Operator), - - (words(( - 'bool', 'bytearray', 'bytes', 'classmethod', 'complex', 'dict', 'dict\'', - 'float', 'frozenset', 'int', 'list', 'list\'', 'memoryview', 'object', - 'property', 'range', 'set', 'set\'', 'slice', 'staticmethod', 'str', - 'super', 'tuple', 'tuple\'', 'type'), - prefix=r'(?`_ source code (version 3.x). + + .. versionadded:: 0.10 + + .. versionchanged:: 2.5 + This is now the default ``PythonLexer``. It is still available as the + alias ``Python3Lexer``. + """ + + name = 'Python' + aliases = ['python', 'py', 'sage', 'python3', 'py3'] + filenames = [ + '*.py', + '*.pyw', + # Jython + '*.jy', + # Sage + '*.sage', + # SCons + '*.sc', + 'SConstruct', + 'SConscript', + # Skylark/Starlark (used by Bazel, Buck, and Pants) + '*.bzl', + 'BUCK', + 'BUILD', + 'BUILD.bazel', + 'WORKSPACE', + # Twisted Application infrastructure + '*.tac', + ] + mimetypes = ['text/x-python', 'application/x-python', + 'text/x-python3', 'application/x-python3'] + + flags = re.MULTILINE | re.UNICODE + + uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue) + + def innerstring_rules(ttype): + return [ + # the old style '%s' % (...) string formatting (still valid in Py3) + (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' + '[hlL]?[E-GXc-giorsaux%]', String.Interpol), + # the new style '{}'.format(...) string formatting + (r'\{' + r'((\w+)((\.\w+)|(\[[^\]]+\]))*)?' # field name + r'(\![sra])?' # conversion + r'(\:(.?[<>=\^])?[-+ ]?#?0?(\d+)?,?(\.\d+)?[E-GXb-gnosx%]?)?' + r'\}', String.Interpol), + + # backslashes, quotes and formatting signs must be parsed one at a time + (r'[^\\\'"%{\n]+', ttype), + (r'[\'"\\]', ttype), + # unhandled string formatting sign + (r'%|(\{{1,2})', ttype) + # newlines are an error (use "nl" state) + ] + + def fstring_rules(ttype): + return [ + # Assuming that a '}' is the closing brace after format specifier. + # Sadly, this means that we won't detect syntax error. But it's + # more important to parse correct syntax correctly, than to + # highlight invalid syntax. + (r'\}', String.Interpol), + (r'\{', String.Interpol, 'expr-inside-fstring'), + # backslashes, quotes and formatting signs must be parsed one at a time + (r'[^\\\'"{}\n]+', ttype), + (r'[\'"\\]', ttype), + # newlines are an error (use "nl" state) + ] + + tokens = { + 'root': [ + (r'\n', Text), + (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")', + bygroups(Text, String.Affix, String.Doc)), + (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')", + bygroups(Text, String.Affix, String.Doc)), + (r'\A#!.+$', Comment.Hashbang), + (r'#.*$', Comment.Single), + (r'\\\n', Text), + (r'\\', Text), + include('keywords'), + (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'), + (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'), + (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), + 'fromimport'), + (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), + 'import'), + include('expr'), + ], + 'expr': [ + # raw f-strings + ('(?i)(rf|fr)(""")', + bygroups(String.Affix, String.Double), 'tdqf'), + ("(?i)(rf|fr)(''')", + bygroups(String.Affix, String.Single), 'tsqf'), + ('(?i)(rf|fr)(")', + bygroups(String.Affix, String.Double), 'dqf'), + ("(?i)(rf|fr)(')", + bygroups(String.Affix, String.Single), 'sqf'), + # non-raw f-strings + ('([fF])(""")', bygroups(String.Affix, String.Double), + combined('fstringescape', 'tdqf')), + ("([fF])(''')", bygroups(String.Affix, String.Single), + combined('fstringescape', 'tsqf')), + ('([fF])(")', bygroups(String.Affix, String.Double), + combined('fstringescape', 'dqf')), + ("([fF])(')", bygroups(String.Affix, String.Single), + combined('fstringescape', 'sqf')), + # raw strings + ('(?i)(rb|br|r)(""")', + bygroups(String.Affix, String.Double), 'tdqs'), + ("(?i)(rb|br|r)(''')", + bygroups(String.Affix, String.Single), 'tsqs'), + ('(?i)(rb|br|r)(")', + bygroups(String.Affix, String.Double), 'dqs'), + ("(?i)(rb|br|r)(')", + bygroups(String.Affix, String.Single), 'sqs'), + # non-raw strings + ('([uUbB]?)(""")', bygroups(String.Affix, String.Double), + combined('stringescape', 'tdqs')), + ("([uUbB]?)(''')", bygroups(String.Affix, String.Single), + combined('stringescape', 'tsqs')), + ('([uUbB]?)(")', bygroups(String.Affix, String.Double), + combined('stringescape', 'dqs')), + ("([uUbB]?)(')", bygroups(String.Affix, String.Single), + combined('stringescape', 'sqs')), + (r'[^\S\n]+', Text), + (r'!=|==|<<|>>|:=|[-~+/*%=<>&^|.]', Operator), + (r'[]{}:(),;[]', Punctuation), + (r'(in|is|and|or|not)\b', Operator.Word), + include('expr-keywords'), + include('builtins'), + include('magicfuncs'), + include('magicvars'), + include('name'), + include('numbers'), + ], + 'expr-inside-fstring': [ + (r'[{([]', Punctuation, 'expr-inside-fstring-inner'), + # without format specifier + (r'(=\s*)?' # debug (https://bugs.python.org/issue36817) + r'(\![sraf])?' # conversion + r'\}', String.Interpol, '#pop'), + # with format specifier + # we'll catch the remaining '}' in the outer scope + (r'(=\s*)?' # debug (https://bugs.python.org/issue36817) + r'(\![sraf])?' # conversion + r':', String.Interpol, '#pop'), + (r'\s+', Text), # allow new lines + include('expr'), + ], + 'expr-inside-fstring-inner': [ + (r'[{([]', Punctuation, 'expr-inside-fstring-inner'), + (r'[])}]', Punctuation, '#pop'), + (r'\s+', Text), # allow new lines + include('expr'), + ], + 'expr-keywords': [ + # Based on https://docs.python.org/3/reference/expressions.html + (words(( + 'async for', 'await', 'else', 'for', 'if', 'lambda', + 'yield', 'yield from'), suffix=r'\b'), + Keyword), + (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant), + ], + 'keywords': [ + (words(( + 'assert', 'async', 'await', 'break', 'continue', 'del', 'elif', + 'else', 'except', 'finally', 'for', 'global', 'if', 'lambda', + 'pass', 'raise', 'nonlocal', 'return', 'try', 'while', 'yield', + 'yield from', 'as', 'with'), suffix=r'\b'), + Keyword), + (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant), + ], + 'builtins': [ + (words(( + '__import__', 'abs', 'all', 'any', 'bin', 'bool', 'bytearray', + 'bytes', 'chr', 'classmethod', 'compile', 'complex', + 'delattr', 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'filter', + 'float', 'format', 'frozenset', 'getattr', 'globals', 'hasattr', + 'hash', 'hex', 'id', 'input', 'int', 'isinstance', 'issubclass', + 'iter', 'len', 'list', 'locals', 'map', 'max', 'memoryview', + 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'print', + 'property', 'range', 'repr', 'reversed', 'round', 'set', 'setattr', + 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', + 'type', 'vars', 'zip'), prefix=r'(?`_ source code. + + .. versionchanged:: 2.5 + This class has been renamed from ``PythonLexer``. ``PythonLexer`` now + refers to the Python 3 variant. File name patterns like ``*.py`` have + been moved to Python 3 as well. + """ + + name = 'Python 2.x' + aliases = ['python2', 'py2'] + filenames = [] # now taken over by PythonLexer (3.x) + mimetypes = ['text/x-python2', 'application/x-python2'] + + def innerstring_rules(ttype): + return [ + # the old style '%s' % (...) string formatting + (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' + '[hlL]?[E-GXc-giorsux%]', String.Interpol), + # backslashes, quotes and formatting signs must be parsed one at a time + (r'[^\\\'"%\n]+', ttype), + (r'[\'"\\]', ttype), + # unhandled string formatting sign + (r'%', ttype), + # newlines are an error (use "nl" state) + ] + + tokens = { + 'root': [ + (r'\n', Text), + (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")', + bygroups(Text, String.Affix, String.Doc)), + (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')", + bygroups(Text, String.Affix, String.Doc)), + (r'[^\S\n]+', Text), + (r'\A#!.+$', Comment.Hashbang), + (r'#.*$', Comment.Single), + (r'[]{}:(),;[]', Punctuation), + (r'\\\n', Text), + (r'\\', Text), + (r'(in|is|and|or|not)\b', Operator.Word), + (r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator), + include('keywords'), + (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'), + (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'), + (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), + 'fromimport'), + (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), + 'import'), + include('builtins'), + include('magicfuncs'), + include('magicvars'), + include('backtick'), + ('([rR]|[uUbB][rR]|[rR][uUbB])(""")', + bygroups(String.Affix, String.Double), 'tdqs'), + ("([rR]|[uUbB][rR]|[rR][uUbB])(''')", + bygroups(String.Affix, String.Single), 'tsqs'), + ('([rR]|[uUbB][rR]|[rR][uUbB])(")', + bygroups(String.Affix, String.Double), 'dqs'), + ("([rR]|[uUbB][rR]|[rR][uUbB])(')", + bygroups(String.Affix, String.Single), 'sqs'), + ('([uUbB]?)(""")', bygroups(String.Affix, String.Double), + combined('stringescape', 'tdqs')), + ("([uUbB]?)(''')", bygroups(String.Affix, String.Single), + combined('stringescape', 'tsqs')), + ('([uUbB]?)(")', bygroups(String.Affix, String.Double), + combined('stringescape', 'dqs')), + ("([uUbB]?)(')", bygroups(String.Affix, String.Single), + combined('stringescape', 'sqs')), + include('name'), + include('numbers'), + ], + 'keywords': [ + (words(( + 'assert', 'break', 'continue', 'del', 'elif', 'else', 'except', + 'exec', 'finally', 'for', 'global', 'if', 'lambda', 'pass', + 'print', 'raise', 'return', 'try', 'while', 'yield', + 'yield from', 'as', 'with'), suffix=r'\b'), + Keyword), + ], + 'builtins': [ + (words(( + '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', + 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', 'classmethod', + 'cmp', 'coerce', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod', + 'enumerate', 'eval', 'execfile', 'exit', 'file', 'filter', 'float', + 'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id', + 'input', 'int', 'intern', 'isinstance', 'issubclass', 'iter', 'len', + 'list', 'locals', 'long', 'map', 'max', 'min', 'next', 'object', + 'oct', 'open', 'ord', 'pow', 'property', 'range', 'raw_input', 'reduce', + 'reload', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice', + 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type', + 'unichr', 'unicode', 'vars', 'xrange', 'zip'), + prefix=r'(?>> a = 'foo' + >>> print a + foo + >>> 1 / 0 + Traceback (most recent call last): + File "", line 1, in + ZeroDivisionError: integer division or modulo by zero + + Additional options: + + `python3` + Use Python 3 lexer for code. Default is ``True``. + + .. versionadded:: 1.0 + .. versionchanged:: 2.5 + Now defaults to ``True``. + """ + name = 'Python console session' + aliases = ['pycon'] + mimetypes = ['text/x-python-doctest'] + + def __init__(self, **options): + self.python3 = get_bool_opt(options, 'python3', True) + Lexer.__init__(self, **options) + + def get_tokens_unprocessed(self, text): + if self.python3: + pylexer = PythonLexer(**self.options) + tblexer = PythonTracebackLexer(**self.options) + else: + pylexer = Python2Lexer(**self.options) + tblexer = Python2TracebackLexer(**self.options) + + curcode = '' + insertions = [] + curtb = '' + tbindex = 0 + tb = 0 + for match in line_re.finditer(text): + line = match.group() + if line.startswith('>>> ') or line.startswith('... '): + tb = 0 + insertions.append((len(curcode), + [(0, Generic.Prompt, line[:4])])) + curcode += line[4:] + elif line.rstrip() == '...' and not tb: + # only a new >>> prompt can end an exception block + # otherwise an ellipsis in place of the traceback frames + # will be mishandled + insertions.append((len(curcode), + [(0, Generic.Prompt, '...')])) + curcode += line[3:] + else: + if curcode: + yield from do_insertions( + insertions, pylexer.get_tokens_unprocessed(curcode)) + curcode = '' + insertions = [] + if (line.startswith('Traceback (most recent call last):') or + re.match(' File "[^"]+", line \\d+\\n$', line)): + tb = 1 + curtb = line + tbindex = match.start() + elif line == 'KeyboardInterrupt\n': + yield match.start(), Name.Class, line + elif tb: + curtb += line + if not (line.startswith(' ') or line.strip() == '...'): + tb = 0 + for i, t, v in tblexer.get_tokens_unprocessed(curtb): + yield tbindex+i, t, v + curtb = '' + else: + yield match.start(), Generic.Output, line + if curcode: + yield from do_insertions(insertions, + pylexer.get_tokens_unprocessed(curcode)) + if curtb: + for i, t, v in tblexer.get_tokens_unprocessed(curtb): + yield tbindex+i, t, v + + +class PythonTracebackLexer(RegexLexer): + """ + For Python 3.x tracebacks, with support for chained exceptions. + + .. versionadded:: 1.0 + + .. versionchanged:: 2.5 + This is now the default ``PythonTracebackLexer``. It is still available + as the alias ``Python3TracebackLexer``. + """ + + name = 'Python Traceback' + aliases = ['pytb', 'py3tb'] + filenames = ['*.pytb', '*.py3tb'] + mimetypes = ['text/x-python-traceback', 'text/x-python3-traceback'] + + tokens = { + 'root': [ + (r'\n', Text), + (r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'), + (r'^During handling of the above exception, another ' + r'exception occurred:\n\n', Generic.Traceback), + (r'^The above exception was the direct cause of the ' + r'following exception:\n\n', Generic.Traceback), + (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'), + (r'^.*\n', Other), + ], + 'intb': [ + (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)', + bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)), + (r'^( File )("[^"]+")(, line )(\d+)(\n)', + bygroups(Text, Name.Builtin, Text, Number, Text)), + (r'^( )(.+)(\n)', + bygroups(Text, using(PythonLexer), Text)), + (r'^([ \t]*)(\.\.\.)(\n)', + bygroups(Text, Comment, Text)), # for doctests... + (r'^([^:]+)(: )(.+)(\n)', + bygroups(Generic.Error, Text, Name, Text), '#pop'), + (r'^([a-zA-Z_]\w*)(:?\n)', + bygroups(Generic.Error, Text), '#pop') + ], + } + + +Python3TracebackLexer = PythonTracebackLexer + + +class Python2TracebackLexer(RegexLexer): + """ + For Python tracebacks. + + .. versionadded:: 0.7 + + .. versionchanged:: 2.5 + This class has been renamed from ``PythonTracebackLexer``. + ``PythonTracebackLexer`` now refers to the Python 3 variant. + """ + + name = 'Python 2.x Traceback' + aliases = ['py2tb'] + filenames = ['*.py2tb'] + mimetypes = ['text/x-python2-traceback'] + + tokens = { + 'root': [ + # Cover both (most recent call last) and (innermost last) + # The optional ^C allows us to catch keyboard interrupt signals. + (r'^(\^C)?(Traceback.*\n)', + bygroups(Text, Generic.Traceback), 'intb'), + # SyntaxError starts with this. + (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'), + (r'^.*\n', Other), + ], + 'intb': [ + (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)', + bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)), + (r'^( File )("[^"]+")(, line )(\d+)(\n)', + bygroups(Text, Name.Builtin, Text, Number, Text)), + (r'^( )(.+)(\n)', + bygroups(Text, using(Python2Lexer), Text)), + (r'^([ \t]*)(\.\.\.)(\n)', + bygroups(Text, Comment, Text)), # for doctests... + (r'^([^:]+)(: )(.+)(\n)', + bygroups(Generic.Error, Text, Name, Text), '#pop'), + (r'^([a-zA-Z_]\w*)(:?\n)', + bygroups(Generic.Error, Text), '#pop') + ], + } + + +class CythonLexer(RegexLexer): + """ + For Pyrex and `Cython `_ source code. + + .. versionadded:: 1.1 + """ + + name = 'Cython' + aliases = ['cython', 'pyx', 'pyrex'] + filenames = ['*.pyx', '*.pxd', '*.pxi'] + mimetypes = ['text/x-cython', 'application/x-cython'] + + tokens = { + 'root': [ + (r'\n', Text), + (r'^(\s*)("""(?:.|\n)*?""")', bygroups(Text, String.Doc)), + (r"^(\s*)('''(?:.|\n)*?''')", bygroups(Text, String.Doc)), + (r'[^\S\n]+', Text), + (r'#.*$', Comment), + (r'[]{}:(),;[]', Punctuation), + (r'\\\n', Text), + (r'\\', Text), + (r'(in|is|and|or|not)\b', Operator.Word), + (r'(<)([a-zA-Z0-9.?]+)(>)', + bygroups(Punctuation, Keyword.Type, Punctuation)), + (r'!=|==|<<|>>|[-~+/*%=<>&^|.?]', Operator), + (r'(from)(\d+)(<=)(\s+)(<)(\d+)(:)', + bygroups(Keyword, Number.Integer, Operator, Name, Operator, + Name, Punctuation)), + include('keywords'), + (r'(def|property)(\s+)', bygroups(Keyword, Text), 'funcname'), + (r'(cp?def)(\s+)', bygroups(Keyword, Text), 'cdef'), + # (should actually start a block with only cdefs) + (r'(cdef)(:)', bygroups(Keyword, Punctuation)), + (r'(class|struct)(\s+)', bygroups(Keyword, Text), 'classname'), + (r'(from)(\s+)', bygroups(Keyword, Text), 'fromimport'), + (r'(c?import)(\s+)', bygroups(Keyword, Text), 'import'), + include('builtins'), + include('backtick'), + ('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'), + ("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'), + ('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'), + ("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'), + ('[uU]?"""', String, combined('stringescape', 'tdqs')), + ("[uU]?'''", String, combined('stringescape', 'tsqs')), + ('[uU]?"', String, combined('stringescape', 'dqs')), + ("[uU]?'", String, combined('stringescape', 'sqs')), + include('name'), + include('numbers'), + ], + 'keywords': [ + (words(( + 'assert', 'async', 'await', 'break', 'by', 'continue', 'ctypedef', 'del', 'elif', + 'else', 'except', 'except?', 'exec', 'finally', 'for', 'fused', 'gil', + 'global', 'if', 'include', 'lambda', 'nogil', 'pass', 'print', + 'raise', 'return', 'try', 'while', 'yield', 'as', 'with'), suffix=r'\b'), + Keyword), + (r'(DEF|IF|ELIF|ELSE)\b', Comment.Preproc), + ], + 'builtins': [ + (words(( + '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', + 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', + 'classmethod', 'cmp', 'coerce', 'compile', 'complex', 'delattr', + 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'execfile', 'exit', + 'file', 'filter', 'float', 'frozenset', 'getattr', 'globals', + 'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'intern', 'isinstance', + 'issubclass', 'iter', 'len', 'list', 'locals', 'long', 'map', 'max', + 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'property', + 'range', 'raw_input', 'reduce', 'reload', 'repr', 'reversed', + 'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod', + 'str', 'sum', 'super', 'tuple', 'type', 'unichr', 'unicode', 'unsigned', + 'vars', 'xrange', 'zip'), prefix=r'(?`_, + a functional and object-oriented programming language + running on the CPython 3 VM. + + .. versionadded:: 1.6 + """ + name = 'dg' + aliases = ['dg'] + filenames = ['*.dg'] + mimetypes = ['text/x-dg'] + + tokens = { + 'root': [ + (r'\s+', Text), + (r'#.*?$', Comment.Single), + + (r'(?i)0b[01]+', Number.Bin), + (r'(?i)0o[0-7]+', Number.Oct), + (r'(?i)0x[0-9a-f]+', Number.Hex), + (r'(?i)[+-]?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?j?', Number.Float), + (r'(?i)[+-]?[0-9]+e[+-]?\d+j?', Number.Float), + (r'(?i)[+-]?[0-9]+j?', Number.Integer), + + (r"(?i)(br|r?b?)'''", String, combined('stringescape', 'tsqs', 'string')), + (r'(?i)(br|r?b?)"""', String, combined('stringescape', 'tdqs', 'string')), + (r"(?i)(br|r?b?)'", String, combined('stringescape', 'sqs', 'string')), + (r'(?i)(br|r?b?)"', String, combined('stringescape', 'dqs', 'string')), + + (r"`\w+'*`", Operator), + (r'\b(and|in|is|or|where)\b', Operator.Word), + (r'[!$%&*+\-./:<-@\\^|~;,]+', Operator), + + (words(( + 'bool', 'bytearray', 'bytes', 'classmethod', 'complex', 'dict', 'dict\'', + 'float', 'frozenset', 'int', 'list', 'list\'', 'memoryview', 'object', + 'property', 'range', 'set', 'set\'', 'slice', 'staticmethod', 'str', + 'super', 'tuple', 'tuple\'', 'type'), + prefix=r'(?`_. - - Reference for implementing this: «Meta Object Facility (MOF) 2.0 - Query/View/Transformation Specification», Version 1.1 - January 2011 - (http://www.omg.org/spec/QVT/1.1/), see §8.4, «Concrete Syntax» in - particular. - - Notable tokens assignments: - - - Name.Class is assigned to the identifier following any of the following - keywords: metamodel, class, exception, primitive, enum, transformation - or library - - - Name.Function is assigned to the names of mappings and queries - - - Name.Builtin.Pseudo is assigned to the pre-defined variables 'this', - 'self' and 'result'. - """ - # With obvious borrowings & inspiration from the Java, Python and C lexers - - name = 'QVTO' - aliases = ['qvto', 'qvt'] - filenames = ['*.qvto'] - - tokens = { - 'root': [ - (r'\n', Text), - (r'[^\S\n]+', Text), - (r'(--|//)(\s*)(directive:)?(.*)$', - bygroups(Comment, Comment, Comment.Preproc, Comment)), - # Uncomment the following if you want to distinguish between - # '/*' and '/**', à la javadoc - # (r'/[*]{2}(.|\n)*?[*]/', Comment.Multiline), - (r'/[*](.|\n)*?[*]/', Comment.Multiline), - (r'\\\n', Text), - (r'(and|not|or|xor|##?)\b', Operator.Word), - (r'(:{1,2}=|[-+]=)\b', Operator.Word), - (r'(@|<<|>>)\b', Keyword), # stereotypes - (r'!=|<>|==|=|!->|->|>=|<=|[.]{3}|[+/*%=<>&|.~]', Operator), - (r'[]{}:(),;[]', Punctuation), - (r'(true|false|unlimited|null)\b', Keyword.Constant), - (r'(this|self|result)\b', Name.Builtin.Pseudo), - (r'(var)\b', Keyword.Declaration), - (r'(from|import)\b', Keyword.Namespace, 'fromimport'), - (r'(metamodel|class|exception|primitive|enum|transformation|' - r'library)(\s+)(\w+)', - bygroups(Keyword.Word, Text, Name.Class)), - (r'(exception)(\s+)(\w+)', - bygroups(Keyword.Word, Text, Name.Exception)), - (r'(main)\b', Name.Function), - (r'(mapping|helper|query)(\s+)', - bygroups(Keyword.Declaration, Text), 'operation'), - (r'(assert)(\s+)\b', bygroups(Keyword, Text), 'assert'), - (r'(Bag|Collection|Dict|OrderedSet|Sequence|Set|Tuple|List)\b', - Keyword.Type), - include('keywords'), - ('"', String, combined('stringescape', 'dqs')), - ("'", String, combined('stringescape', 'sqs')), - include('name'), - include('numbers'), - # (r'([a-zA-Z_]\w*)(::)([a-zA-Z_]\w*)', - # bygroups(Text, Text, Text)), - ], - - 'fromimport': [ - (r'(?:[ \t]|\\\n)+', Text), - (r'[a-zA-Z_][\w.]*', Name.Namespace), - default('#pop'), - ], - - 'operation': [ - (r'::', Text), - (r'(.*::)([a-zA-Z_]\w*)([ \t]*)(\()', - bygroups(Text, Name.Function, Text, Punctuation), '#pop') - ], - - 'assert': [ - (r'(warning|error|fatal)\b', Keyword, '#pop'), - default('#pop'), # all else: go back - ], - - 'keywords': [ - (words(( - 'abstract', 'access', 'any', 'assert', 'blackbox', 'break', - 'case', 'collect', 'collectNested', 'collectOne', 'collectselect', - 'collectselectOne', 'composes', 'compute', 'configuration', - 'constructor', 'continue', 'datatype', 'default', 'derived', - 'disjuncts', 'do', 'elif', 'else', 'end', 'endif', 'except', - 'exists', 'extends', 'forAll', 'forEach', 'forOne', 'from', 'if', - 'implies', 'in', 'inherits', 'init', 'inout', 'intermediate', - 'invresolve', 'invresolveIn', 'invresolveone', 'invresolveoneIn', - 'isUnique', 'iterate', 'late', 'let', 'literal', 'log', 'map', - 'merges', 'modeltype', 'new', 'object', 'one', 'ordered', 'out', - 'package', 'population', 'property', 'raise', 'readonly', - 'references', 'refines', 'reject', 'resolve', 'resolveIn', - 'resolveone', 'resolveoneIn', 'return', 'select', 'selectOne', - 'sortedBy', 'static', 'switch', 'tag', 'then', 'try', 'typedef', - 'unlimited', 'uses', 'when', 'where', 'while', 'with', 'xcollect', - 'xmap', 'xselect'), suffix=r'\b'), Keyword), - ], - - # There is no need to distinguish between String.Single and - # String.Double: 'strings' is factorised for 'dqs' and 'sqs' - 'strings': [ - (r'[^\\\'"\n]+', String), - # quotes, percents and backslashes must be parsed one at a time - (r'[\'"\\]', String), - ], - 'stringescape': [ - (r'\\([\\btnfr"\']|u[0-3][0-7]{2}|u[0-7]{1,2})', String.Escape) - ], - 'dqs': [ # double-quoted string - (r'"', String, '#pop'), - (r'\\\\|\\"', String.Escape), - include('strings') - ], - 'sqs': [ # single-quoted string - (r"'", String, '#pop'), - (r"\\\\|\\'", String.Escape), - include('strings') - ], - 'name': [ - (r'[a-zA-Z_]\w*', Name), - ], - # numbers: excerpt taken from the python lexer - 'numbers': [ - (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), - (r'\d+[eE][+-]?[0-9]+', Number.Float), - (r'\d+', Number.Integer) - ], - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.qvt + ~~~~~~~~~~~~~~~~~~~ + + Lexer for QVT Operational language. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, bygroups, include, combined, default, \ + words +from pygments.token import Text, Comment, Operator, Keyword, Punctuation, \ + Name, String, Number + +__all__ = ['QVToLexer'] + + +class QVToLexer(RegexLexer): + """ + For the `QVT Operational Mapping language `_. + + Reference for implementing this: «Meta Object Facility (MOF) 2.0 + Query/View/Transformation Specification», Version 1.1 - January 2011 + (http://www.omg.org/spec/QVT/1.1/), see §8.4, «Concrete Syntax» in + particular. + + Notable tokens assignments: + + - Name.Class is assigned to the identifier following any of the following + keywords: metamodel, class, exception, primitive, enum, transformation + or library + + - Name.Function is assigned to the names of mappings and queries + + - Name.Builtin.Pseudo is assigned to the pre-defined variables 'this', + 'self' and 'result'. + """ + # With obvious borrowings & inspiration from the Java, Python and C lexers + + name = 'QVTO' + aliases = ['qvto', 'qvt'] + filenames = ['*.qvto'] + + tokens = { + 'root': [ + (r'\n', Text), + (r'[^\S\n]+', Text), + (r'(--|//)(\s*)(directive:)?(.*)$', + bygroups(Comment, Comment, Comment.Preproc, Comment)), + # Uncomment the following if you want to distinguish between + # '/*' and '/**', à la javadoc + # (r'/[*]{2}(.|\n)*?[*]/', Comment.Multiline), + (r'/[*](.|\n)*?[*]/', Comment.Multiline), + (r'\\\n', Text), + (r'(and|not|or|xor|##?)\b', Operator.Word), + (r'(:{1,2}=|[-+]=)\b', Operator.Word), + (r'(@|<<|>>)\b', Keyword), # stereotypes + (r'!=|<>|==|=|!->|->|>=|<=|[.]{3}|[+/*%=<>&|.~]', Operator), + (r'[]{}:(),;[]', Punctuation), + (r'(true|false|unlimited|null)\b', Keyword.Constant), + (r'(this|self|result)\b', Name.Builtin.Pseudo), + (r'(var)\b', Keyword.Declaration), + (r'(from|import)\b', Keyword.Namespace, 'fromimport'), + (r'(metamodel|class|exception|primitive|enum|transformation|' + r'library)(\s+)(\w+)', + bygroups(Keyword.Word, Text, Name.Class)), + (r'(exception)(\s+)(\w+)', + bygroups(Keyword.Word, Text, Name.Exception)), + (r'(main)\b', Name.Function), + (r'(mapping|helper|query)(\s+)', + bygroups(Keyword.Declaration, Text), 'operation'), + (r'(assert)(\s+)\b', bygroups(Keyword, Text), 'assert'), + (r'(Bag|Collection|Dict|OrderedSet|Sequence|Set|Tuple|List)\b', + Keyword.Type), + include('keywords'), + ('"', String, combined('stringescape', 'dqs')), + ("'", String, combined('stringescape', 'sqs')), + include('name'), + include('numbers'), + # (r'([a-zA-Z_]\w*)(::)([a-zA-Z_]\w*)', + # bygroups(Text, Text, Text)), + ], + + 'fromimport': [ + (r'(?:[ \t]|\\\n)+', Text), + (r'[a-zA-Z_][\w.]*', Name.Namespace), + default('#pop'), + ], + + 'operation': [ + (r'::', Text), + (r'(.*::)([a-zA-Z_]\w*)([ \t]*)(\()', + bygroups(Text, Name.Function, Text, Punctuation), '#pop') + ], + + 'assert': [ + (r'(warning|error|fatal)\b', Keyword, '#pop'), + default('#pop'), # all else: go back + ], + + 'keywords': [ + (words(( + 'abstract', 'access', 'any', 'assert', 'blackbox', 'break', + 'case', 'collect', 'collectNested', 'collectOne', 'collectselect', + 'collectselectOne', 'composes', 'compute', 'configuration', + 'constructor', 'continue', 'datatype', 'default', 'derived', + 'disjuncts', 'do', 'elif', 'else', 'end', 'endif', 'except', + 'exists', 'extends', 'forAll', 'forEach', 'forOne', 'from', 'if', + 'implies', 'in', 'inherits', 'init', 'inout', 'intermediate', + 'invresolve', 'invresolveIn', 'invresolveone', 'invresolveoneIn', + 'isUnique', 'iterate', 'late', 'let', 'literal', 'log', 'map', + 'merges', 'modeltype', 'new', 'object', 'one', 'ordered', 'out', + 'package', 'population', 'property', 'raise', 'readonly', + 'references', 'refines', 'reject', 'resolve', 'resolveIn', + 'resolveone', 'resolveoneIn', 'return', 'select', 'selectOne', + 'sortedBy', 'static', 'switch', 'tag', 'then', 'try', 'typedef', + 'unlimited', 'uses', 'when', 'where', 'while', 'with', 'xcollect', + 'xmap', 'xselect'), suffix=r'\b'), Keyword), + ], + + # There is no need to distinguish between String.Single and + # String.Double: 'strings' is factorised for 'dqs' and 'sqs' + 'strings': [ + (r'[^\\\'"\n]+', String), + # quotes, percents and backslashes must be parsed one at a time + (r'[\'"\\]', String), + ], + 'stringescape': [ + (r'\\([\\btnfr"\']|u[0-3][0-7]{2}|u[0-7]{1,2})', String.Escape) + ], + 'dqs': [ # double-quoted string + (r'"', String, '#pop'), + (r'\\\\|\\"', String.Escape), + include('strings') + ], + 'sqs': [ # single-quoted string + (r"'", String, '#pop'), + (r"\\\\|\\'", String.Escape), + include('strings') + ], + 'name': [ + (r'[a-zA-Z_]\w*', Name), + ], + # numbers: excerpt taken from the python lexer + 'numbers': [ + (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), + (r'\d+[eE][+-]?[0-9]+', Number.Float), + (r'\d+', Number.Integer) + ], + } diff --git a/pygments/lexers/r.py b/pygments/lexers/r.py old mode 100644 new mode 100755 index 33e57b3..48866c3 --- a/pygments/lexers/r.py +++ b/pygments/lexers/r.py @@ -1,193 +1,191 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.r - ~~~~~~~~~~~~~~~~~ - - Lexers for the R/S languages. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import Lexer, RegexLexer, include, do_insertions, bygroups -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Generic - -__all__ = ['RConsoleLexer', 'SLexer', 'RdLexer'] - - -line_re = re.compile('.*?\n') - - -class RConsoleLexer(Lexer): - """ - For R console transcripts or R CMD BATCH output files. - """ - - name = 'RConsole' - aliases = ['rconsole', 'rout'] - filenames = ['*.Rout'] - - def get_tokens_unprocessed(self, text): - slexer = SLexer(**self.options) - - current_code_block = '' - insertions = [] - - for match in line_re.finditer(text): - line = match.group() - if line.startswith('>') or line.startswith('+'): - # Colorize the prompt as such, - # then put rest of line into current_code_block - insertions.append((len(current_code_block), - [(0, Generic.Prompt, line[:2])])) - current_code_block += line[2:] - else: - # We have reached a non-prompt line! - # If we have stored prompt lines, need to process them first. - if current_code_block: - # Weave together the prompts and highlight code. - for item in do_insertions( - insertions, slexer.get_tokens_unprocessed(current_code_block)): - yield item - # Reset vars for next code block. - current_code_block = '' - insertions = [] - # Now process the actual line itself, this is output from R. - yield match.start(), Generic.Output, line - - # If we happen to end on a code block with nothing after it, need to - # process the last code block. This is neither elegant nor DRY so - # should be changed. - if current_code_block: - for item in do_insertions( - insertions, slexer.get_tokens_unprocessed(current_code_block)): - yield item - - -class SLexer(RegexLexer): - """ - For S, S-plus, and R source code. - - .. versionadded:: 0.10 - """ - - name = 'S' - aliases = ['splus', 's', 'r'] - filenames = ['*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'] - mimetypes = ['text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', - 'text/x-R', 'text/x-r-history', 'text/x-r-profile'] - - valid_name = r'`[^`\\]*(?:\\.[^`\\]*)*`|(?:[a-zA-Z]|\.[A-Za-z_.])[\w_.]*|\.' - tokens = { - 'comments': [ - (r'#.*$', Comment.Single), - ], - 'valid_name': [ - (valid_name, Name), - ], - 'punctuation': [ - (r'\[{1,2}|\]{1,2}|\(|\)|;|,', Punctuation), - ], - 'keywords': [ - (r'(if|else|for|while|repeat|in|next|break|return|switch|function)' - r'(?![\w.])', - Keyword.Reserved), - ], - 'operators': [ - (r'<>?|-|==|<=|>=|<|>|&&?|!=|\|\|?|\?', Operator), - (r'\*|\+|\^|/|!|%[^%]*%|=|~|\$|@|:{1,3}', Operator), - ], - 'builtin_symbols': [ - (r'(NULL|NA(_(integer|real|complex|character)_)?|' - r'letters|LETTERS|Inf|TRUE|FALSE|NaN|pi|\.\.(\.|[0-9]+))' - r'(?![\w.])', - Keyword.Constant), - (r'(T|F)\b', Name.Builtin.Pseudo), - ], - 'numbers': [ - # hex number - (r'0[xX][a-fA-F0-9]+([pP][0-9]+)?[Li]?', Number.Hex), - # decimal number - (r'[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)([eE][+-]?[0-9]+)?[Li]?', - Number), - ], - 'statements': [ - include('comments'), - # whitespaces - (r'\s+', Text), - (r'\'', String, 'string_squote'), - (r'\"', String, 'string_dquote'), - include('builtin_symbols'), - include('valid_name'), - include('numbers'), - include('keywords'), - include('punctuation'), - include('operators'), - ], - 'root': [ - # calls: - (r'(%s)\s*(?=\()' % valid_name, Name.Function), - include('statements'), - # blocks: - (r'\{|\}', Punctuation), - # (r'\{', Punctuation, 'block'), - (r'.', Text), - ], - # 'block': [ - # include('statements'), - # ('\{', Punctuation, '#push'), - # ('\}', Punctuation, '#pop') - # ], - 'string_squote': [ - (r'([^\'\\]|\\.)*\'', String, '#pop'), - ], - 'string_dquote': [ - (r'([^"\\]|\\.)*"', String, '#pop'), - ], - } - - def analyse_text(text): - if re.search(r'[a-z0-9_\])\s]<-(?!-)', text): - return 0.11 - - -class RdLexer(RegexLexer): - """ - Pygments Lexer for R documentation (Rd) files - - This is a very minimal implementation, highlighting little more - than the macros. A description of Rd syntax is found in `Writing R - Extensions `_ - and `Parsing Rd files `_. - - .. versionadded:: 1.6 - """ - name = 'Rd' - aliases = ['rd'] - filenames = ['*.Rd'] - mimetypes = ['text/x-r-doc'] - - # To account for verbatim / LaTeX-like / and R-like areas - # would require parsing. - tokens = { - 'root': [ - # catch escaped brackets and percent sign - (r'\\[\\{}%]', String.Escape), - # comments - (r'%.*$', Comment), - # special macros with no arguments - (r'\\(?:cr|l?dots|R|tab)\b', Keyword.Constant), - # macros - (r'\\[a-zA-Z]+\b', Keyword), - # special preprocessor macros - (r'^\s*#(?:ifn?def|endif).*\b', Comment.Preproc), - # non-escaped brackets - (r'[{}]', Name.Builtin), - # everything else - (r'[^\\%\n{}]+', Text), - (r'.', Text), - ] - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.r + ~~~~~~~~~~~~~~~~~ + + Lexers for the R/S languages. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import Lexer, RegexLexer, include, do_insertions, bygroups +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Generic + +__all__ = ['RConsoleLexer', 'SLexer', 'RdLexer'] + + +line_re = re.compile('.*?\n') + + +class RConsoleLexer(Lexer): + """ + For R console transcripts or R CMD BATCH output files. + """ + + name = 'RConsole' + aliases = ['rconsole', 'rout'] + filenames = ['*.Rout'] + + def get_tokens_unprocessed(self, text): + slexer = SLexer(**self.options) + + current_code_block = '' + insertions = [] + + for match in line_re.finditer(text): + line = match.group() + if line.startswith('>') or line.startswith('+'): + # Colorize the prompt as such, + # then put rest of line into current_code_block + insertions.append((len(current_code_block), + [(0, Generic.Prompt, line[:2])])) + current_code_block += line[2:] + else: + # We have reached a non-prompt line! + # If we have stored prompt lines, need to process them first. + if current_code_block: + # Weave together the prompts and highlight code. + yield from do_insertions( + insertions, slexer.get_tokens_unprocessed(current_code_block)) + # Reset vars for next code block. + current_code_block = '' + insertions = [] + # Now process the actual line itself, this is output from R. + yield match.start(), Generic.Output, line + + # If we happen to end on a code block with nothing after it, need to + # process the last code block. This is neither elegant nor DRY so + # should be changed. + if current_code_block: + yield from do_insertions( + insertions, slexer.get_tokens_unprocessed(current_code_block)) + + +class SLexer(RegexLexer): + """ + For S, S-plus, and R source code. + + .. versionadded:: 0.10 + """ + + name = 'S' + aliases = ['splus', 's', 'r'] + filenames = ['*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'] + mimetypes = ['text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', + 'text/x-R', 'text/x-r-history', 'text/x-r-profile'] + + valid_name = r'`[^`\\]*(?:\\.[^`\\]*)*`|(?:[a-zA-Z]|\.[A-Za-z_.])[\w.]*|\.' + tokens = { + 'comments': [ + (r'#.*$', Comment.Single), + ], + 'valid_name': [ + (valid_name, Name), + ], + 'punctuation': [ + (r'\[{1,2}|\]{1,2}|\(|\)|;|,', Punctuation), + ], + 'keywords': [ + (r'(if|else|for|while|repeat|in|next|break|return|switch|function)' + r'(?![\w.])', + Keyword.Reserved), + ], + 'operators': [ + (r'<>?|-|==|<=|>=|<|>|&&?|!=|\|\|?|\?', Operator), + (r'\*|\+|\^|/|!|%[^%]*%|=|~|\$|@|:{1,3}', Operator), + ], + 'builtin_symbols': [ + (r'(NULL|NA(_(integer|real|complex|character)_)?|' + r'letters|LETTERS|Inf|TRUE|FALSE|NaN|pi|\.\.(\.|[0-9]+))' + r'(?![\w.])', + Keyword.Constant), + (r'(T|F)\b', Name.Builtin.Pseudo), + ], + 'numbers': [ + # hex number + (r'0[xX][a-fA-F0-9]+([pP][0-9]+)?[Li]?', Number.Hex), + # decimal number + (r'[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)([eE][+-]?[0-9]+)?[Li]?', + Number), + ], + 'statements': [ + include('comments'), + # whitespaces + (r'\s+', Text), + (r'\'', String, 'string_squote'), + (r'\"', String, 'string_dquote'), + include('builtin_symbols'), + include('valid_name'), + include('numbers'), + include('keywords'), + include('punctuation'), + include('operators'), + ], + 'root': [ + # calls: + (r'(%s)\s*(?=\()' % valid_name, Name.Function), + include('statements'), + # blocks: + (r'\{|\}', Punctuation), + # (r'\{', Punctuation, 'block'), + (r'.', Text), + ], + # 'block': [ + # include('statements'), + # ('\{', Punctuation, '#push'), + # ('\}', Punctuation, '#pop') + # ], + 'string_squote': [ + (r'([^\'\\]|\\.)*\'', String, '#pop'), + ], + 'string_dquote': [ + (r'([^"\\]|\\.)*"', String, '#pop'), + ], + } + + def analyse_text(text): + if re.search(r'[a-z0-9_\])\s]<-(?!-)', text): + return 0.11 + + +class RdLexer(RegexLexer): + """ + Pygments Lexer for R documentation (Rd) files + + This is a very minimal implementation, highlighting little more + than the macros. A description of Rd syntax is found in `Writing R + Extensions `_ + and `Parsing Rd files `_. + + .. versionadded:: 1.6 + """ + name = 'Rd' + aliases = ['rd'] + filenames = ['*.Rd'] + mimetypes = ['text/x-r-doc'] + + # To account for verbatim / LaTeX-like / and R-like areas + # would require parsing. + tokens = { + 'root': [ + # catch escaped brackets and percent sign + (r'\\[\\{}%]', String.Escape), + # comments + (r'%.*$', Comment), + # special macros with no arguments + (r'\\(?:cr|l?dots|R|tab)\b', Keyword.Constant), + # macros + (r'\\[a-zA-Z]+\b', Keyword), + # special preprocessor macros + (r'^\s*#(?:ifn?def|endif).*\b', Comment.Preproc), + # non-escaped brackets + (r'[{}]', Name.Builtin), + # everything else + (r'[^\\%\n{}]+', Text), + (r'.', Text), + ] + } diff --git a/pygments/lexers/rdf.py b/pygments/lexers/rdf.py old mode 100644 new mode 100755 index 5927a68..d0ce8ba --- a/pygments/lexers/rdf.py +++ b/pygments/lexers/rdf.py @@ -1,423 +1,423 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.rdf - ~~~~~~~~~~~~~~~~~~~ - - Lexers for semantic web and RDF query languages and markup. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, bygroups, default -from pygments.token import Keyword, Punctuation, String, Number, Operator, Generic, \ - Whitespace, Name, Literal, Comment, Text - -__all__ = ['SparqlLexer', 'TurtleLexer', 'ShExCLexer'] - - -class SparqlLexer(RegexLexer): - """ - Lexer for `SPARQL `_ query language. - - .. versionadded:: 2.0 - """ - name = 'SPARQL' - aliases = ['sparql'] - filenames = ['*.rq', '*.sparql'] - mimetypes = ['application/sparql-query'] - - # character group definitions :: - - PN_CHARS_BASE_GRP = (u'a-zA-Z' - u'\u00c0-\u00d6' - u'\u00d8-\u00f6' - u'\u00f8-\u02ff' - u'\u0370-\u037d' - u'\u037f-\u1fff' - u'\u200c-\u200d' - u'\u2070-\u218f' - u'\u2c00-\u2fef' - u'\u3001-\ud7ff' - u'\uf900-\ufdcf' - u'\ufdf0-\ufffd') - - PN_CHARS_U_GRP = (PN_CHARS_BASE_GRP + '_') - - PN_CHARS_GRP = (PN_CHARS_U_GRP + - r'\-' + - r'0-9' + - u'\u00b7' + - u'\u0300-\u036f' + - u'\u203f-\u2040') - - HEX_GRP = '0-9A-Fa-f' - - PN_LOCAL_ESC_CHARS_GRP = r' _~.\-!$&"()*+,;=/?#@%' - - # terminal productions :: - - PN_CHARS_BASE = '[' + PN_CHARS_BASE_GRP + ']' - - PN_CHARS_U = '[' + PN_CHARS_U_GRP + ']' - - PN_CHARS = '[' + PN_CHARS_GRP + ']' - - HEX = '[' + HEX_GRP + ']' - - PN_LOCAL_ESC_CHARS = '[' + PN_LOCAL_ESC_CHARS_GRP + ']' - - IRIREF = r'<(?:[^<>"{}|^`\\\x00-\x20])*>' - - BLANK_NODE_LABEL = '_:[0-9' + PN_CHARS_U_GRP + '](?:[' + PN_CHARS_GRP + \ - '.]*' + PN_CHARS + ')?' - - PN_PREFIX = PN_CHARS_BASE + '(?:[' + PN_CHARS_GRP + '.]*' + PN_CHARS + ')?' - - VARNAME = u'[0-9' + PN_CHARS_U_GRP + '][' + PN_CHARS_U_GRP + \ - u'0-9\u00b7\u0300-\u036f\u203f-\u2040]*' - - PERCENT = '%' + HEX + HEX - - PN_LOCAL_ESC = r'\\' + PN_LOCAL_ESC_CHARS - - PLX = '(?:' + PERCENT + ')|(?:' + PN_LOCAL_ESC + ')' - - PN_LOCAL = ('(?:[' + PN_CHARS_U_GRP + ':0-9' + ']|' + PLX + ')' + - '(?:(?:[' + PN_CHARS_GRP + '.:]|' + PLX + ')*(?:[' + - PN_CHARS_GRP + ':]|' + PLX + '))?') - - EXPONENT = r'[eE][+-]?\d+' - - # Lexer token definitions :: - - tokens = { - 'root': [ - (r'\s+', Text), - # keywords :: - (r'(?i)(select|construct|describe|ask|where|filter|group\s+by|minus|' - r'distinct|reduced|from\s+named|from|order\s+by|desc|asc|limit|' - r'offset|bindings|load|clear|drop|create|add|move|copy|' - r'insert\s+data|delete\s+data|delete\s+where|delete|insert|' - r'using\s+named|using|graph|default|named|all|optional|service|' - r'silent|bind|union|not\s+in|in|as|having|to|prefix|base)\b', Keyword), - (r'(a)\b', Keyword), - # IRIs :: - ('(' + IRIREF + ')', Name.Label), - # blank nodes :: - ('(' + BLANK_NODE_LABEL + ')', Name.Label), - # # variables :: - ('[?$]' + VARNAME, Name.Variable), - # prefixed names :: - (r'(' + PN_PREFIX + r')?(\:)(' + PN_LOCAL + r')?', - bygroups(Name.Namespace, Punctuation, Name.Tag)), - # function names :: - (r'(?i)(str|lang|langmatches|datatype|bound|iri|uri|bnode|rand|abs|' - r'ceil|floor|round|concat|strlen|ucase|lcase|encode_for_uri|' - r'contains|strstarts|strends|strbefore|strafter|year|month|day|' - r'hours|minutes|seconds|timezone|tz|now|md5|sha1|sha256|sha384|' - r'sha512|coalesce|if|strlang|strdt|sameterm|isiri|isuri|isblank|' - r'isliteral|isnumeric|regex|substr|replace|exists|not\s+exists|' - r'count|sum|min|max|avg|sample|group_concat|separator)\b', - Name.Function), - # boolean literals :: - (r'(true|false)', Keyword.Constant), - # double literals :: - (r'[+\-]?(\d+\.\d*' + EXPONENT + r'|\.?\d+' + EXPONENT + ')', Number.Float), - # decimal literals :: - (r'[+\-]?(\d+\.\d*|\.\d+)', Number.Float), - # integer literals :: - (r'[+\-]?\d+', Number.Integer), - # operators :: - (r'(\|\||&&|=|\*|\-|\+|/|!=|<=|>=|!|<|>)', Operator), - # punctuation characters :: - (r'[(){}.;,:^\[\]]', Punctuation), - # line comments :: - (r'#[^\n]*', Comment), - # strings :: - (r'"""', String, 'triple-double-quoted-string'), - (r'"', String, 'single-double-quoted-string'), - (r"'''", String, 'triple-single-quoted-string'), - (r"'", String, 'single-single-quoted-string'), - ], - 'triple-double-quoted-string': [ - (r'"""', String, 'end-of-string'), - (r'[^\\]+', String), - (r'\\', String, 'string-escape'), - ], - 'single-double-quoted-string': [ - (r'"', String, 'end-of-string'), - (r'[^"\\\n]+', String), - (r'\\', String, 'string-escape'), - ], - 'triple-single-quoted-string': [ - (r"'''", String, 'end-of-string'), - (r'[^\\]+', String), - (r'\\', String.Escape, 'string-escape'), - ], - 'single-single-quoted-string': [ - (r"'", String, 'end-of-string'), - (r"[^'\\\n]+", String), - (r'\\', String, 'string-escape'), - ], - 'string-escape': [ - (r'u' + HEX + '{4}', String.Escape, '#pop'), - (r'U' + HEX + '{8}', String.Escape, '#pop'), - (r'.', String.Escape, '#pop'), - ], - 'end-of-string': [ - (r'(@)([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)', - bygroups(Operator, Name.Function), '#pop:2'), - (r'\^\^', Operator, '#pop:2'), - default('#pop:2'), - ], - } - - -class TurtleLexer(RegexLexer): - """ - Lexer for `Turtle `_ data language. - - .. versionadded:: 2.1 - """ - name = 'Turtle' - aliases = ['turtle'] - filenames = ['*.ttl'] - mimetypes = ['text/turtle', 'application/x-turtle'] - - flags = re.IGNORECASE - - patterns = { - 'PNAME_NS': r'((?:[a-z][\w-]*)?\:)', # Simplified character range - 'IRIREF': r'(<[^<>"{}|^`\\\x00-\x20]*>)' - } - - # PNAME_NS PN_LOCAL (with simplified character range) - patterns['PrefixedName'] = r'%(PNAME_NS)s([a-z][\w-]*)' % patterns - - tokens = { - 'root': [ - (r'\s+', Whitespace), - - # Base / prefix - (r'(@base|BASE)(\s+)%(IRIREF)s(\s*)(\.?)' % patterns, - bygroups(Keyword, Whitespace, Name.Variable, Whitespace, - Punctuation)), - (r'(@prefix|PREFIX)(\s+)%(PNAME_NS)s(\s+)%(IRIREF)s(\s*)(\.?)' % patterns, - bygroups(Keyword, Whitespace, Name.Namespace, Whitespace, - Name.Variable, Whitespace, Punctuation)), - - # The shorthand predicate 'a' - (r'(?<=\s)a(?=\s)', Keyword.Type), - - # IRIREF - (r'%(IRIREF)s' % patterns, Name.Variable), - - # PrefixedName - (r'%(PrefixedName)s' % patterns, - bygroups(Name.Namespace, Name.Tag)), - - # Comment - (r'#[^\n]+', Comment), - - (r'\b(true|false)\b', Literal), - (r'[+\-]?\d*\.\d+', Number.Float), - (r'[+\-]?\d*(:?\.\d+)?E[+\-]?\d+', Number.Float), - (r'[+\-]?\d+', Number.Integer), - (r'[\[\](){}.;,:^]', Punctuation), - - (r'"""', String, 'triple-double-quoted-string'), - (r'"', String, 'single-double-quoted-string'), - (r"'''", String, 'triple-single-quoted-string'), - (r"'", String, 'single-single-quoted-string'), - ], - 'triple-double-quoted-string': [ - (r'"""', String, 'end-of-string'), - (r'[^\\]+', String), - (r'\\', String, 'string-escape'), - ], - 'single-double-quoted-string': [ - (r'"', String, 'end-of-string'), - (r'[^"\\\n]+', String), - (r'\\', String, 'string-escape'), - ], - 'triple-single-quoted-string': [ - (r"'''", String, 'end-of-string'), - (r'[^\\]+', String), - (r'\\', String, 'string-escape'), - ], - 'single-single-quoted-string': [ - (r"'", String, 'end-of-string'), - (r"[^'\\\n]+", String), - (r'\\', String, 'string-escape'), - ], - 'string-escape': [ - (r'.', String, '#pop'), - ], - 'end-of-string': [ - (r'(@)([a-z]+(:?-[a-z0-9]+)*)', - bygroups(Operator, Generic.Emph), '#pop:2'), - - (r'(\^\^)%(IRIREF)s' % patterns, bygroups(Operator, Generic.Emph), '#pop:2'), - (r'(\^\^)%(PrefixedName)s' % patterns, - bygroups(Operator, Generic.Emph, Generic.Emph), '#pop:2'), - - default('#pop:2'), - - ], - } - - # Turtle and Tera Term macro files share the same file extension - # but each has a recognizable and distinct syntax. - def analyse_text(text): - for t in ('@base ', 'BASE ', '@prefix ', 'PREFIX '): - if re.search(r'^\s*%s' % t, text): - return 0.80 - - -class ShExCLexer(RegexLexer): - """ - Lexer for `ShExC `_ shape expressions language syntax. - """ - name = 'ShExC' - aliases = ['shexc', 'shex'] - filenames = ['*.shex'] - mimetypes = ['text/shex'] - - # character group definitions :: - - PN_CHARS_BASE_GRP = (u'a-zA-Z' - u'\u00c0-\u00d6' - u'\u00d8-\u00f6' - u'\u00f8-\u02ff' - u'\u0370-\u037d' - u'\u037f-\u1fff' - u'\u200c-\u200d' - u'\u2070-\u218f' - u'\u2c00-\u2fef' - u'\u3001-\ud7ff' - u'\uf900-\ufdcf' - u'\ufdf0-\ufffd') - - PN_CHARS_U_GRP = (PN_CHARS_BASE_GRP + '_') - - PN_CHARS_GRP = (PN_CHARS_U_GRP + - r'\-' + - r'0-9' + - u'\u00b7' + - u'\u0300-\u036f' + - u'\u203f-\u2040') - - HEX_GRP = '0-9A-Fa-f' - - PN_LOCAL_ESC_CHARS_GRP = r"_~.\-!$&'()*+,;=/?#@%" - - # terminal productions :: - - PN_CHARS_BASE = '[' + PN_CHARS_BASE_GRP + ']' - - PN_CHARS_U = '[' + PN_CHARS_U_GRP + ']' - - PN_CHARS = '[' + PN_CHARS_GRP + ']' - - HEX = '[' + HEX_GRP + ']' - - PN_LOCAL_ESC_CHARS = '[' + PN_LOCAL_ESC_CHARS_GRP + ']' - - UCHAR_NO_BACKSLASH = '(?:u' + HEX + '{4}|U' + HEX + '{8})' - - UCHAR = r'\\' + UCHAR_NO_BACKSLASH - - IRIREF = r'<(?:[^\x00-\x20<>"{}|^`\\]|' + UCHAR + ')*>' - - BLANK_NODE_LABEL = '_:[0-9' + PN_CHARS_U_GRP + '](?:[' + PN_CHARS_GRP + \ - '.]*' + PN_CHARS + ')?' - - PN_PREFIX = PN_CHARS_BASE + '(?:[' + PN_CHARS_GRP + '.]*' + PN_CHARS + ')?' - - PERCENT = '%' + HEX + HEX - - PN_LOCAL_ESC = r'\\' + PN_LOCAL_ESC_CHARS - - PLX = '(?:' + PERCENT + ')|(?:' + PN_LOCAL_ESC + ')' - - PN_LOCAL = ('(?:[' + PN_CHARS_U_GRP + ':0-9' + ']|' + PLX + ')' + - '(?:(?:[' + PN_CHARS_GRP + '.:]|' + PLX + ')*(?:[' + - PN_CHARS_GRP + ':]|' + PLX + '))?') - - EXPONENT = r'[eE][+-]?\d+' - - # Lexer token definitions :: - - tokens = { - 'root': [ - (r'\s+', Text), - # keywords :: - (r'(?i)(base|prefix|start|external|' - r'literal|iri|bnode|nonliteral|length|minlength|maxlength|' - r'mininclusive|minexclusive|maxinclusive|maxexclusive|' - r'totaldigits|fractiondigits|' - r'closed|extra)\b', Keyword), - (r'(a)\b', Keyword), - # IRIs :: - ('(' + IRIREF + ')', Name.Label), - # blank nodes :: - ('(' + BLANK_NODE_LABEL + ')', Name.Label), - # prefixed names :: - (r'(' + PN_PREFIX + r')?(\:)(' + PN_LOCAL + ')?', - bygroups(Name.Namespace, Punctuation, Name.Tag)), - # boolean literals :: - (r'(true|false)', Keyword.Constant), - # double literals :: - (r'[+\-]?(\d+\.\d*' + EXPONENT + r'|\.?\d+' + EXPONENT + ')', Number.Float), - # decimal literals :: - (r'[+\-]?(\d+\.\d*|\.\d+)', Number.Float), - # integer literals :: - (r'[+\-]?\d+', Number.Integer), - # operators :: - (r'[@|$&=*+?^\-~]', Operator), - # operator keywords :: - (r'(?i)(and|or|not)\b', Operator.Word), - # punctuation characters :: - (r'[(){}.;,:^\[\]]', Punctuation), - # line comments :: - (r'#[^\n]*', Comment), - # strings :: - (r'"""', String, 'triple-double-quoted-string'), - (r'"', String, 'single-double-quoted-string'), - (r"'''", String, 'triple-single-quoted-string'), - (r"'", String, 'single-single-quoted-string'), - ], - 'triple-double-quoted-string': [ - (r'"""', String, 'end-of-string'), - (r'[^\\]+', String), - (r'\\', String, 'string-escape'), - ], - 'single-double-quoted-string': [ - (r'"', String, 'end-of-string'), - (r'[^"\\\n]+', String), - (r'\\', String, 'string-escape'), - ], - 'triple-single-quoted-string': [ - (r"'''", String, 'end-of-string'), - (r'[^\\]+', String), - (r'\\', String.Escape, 'string-escape'), - ], - 'single-single-quoted-string': [ - (r"'", String, 'end-of-string'), - (r"[^'\\\n]+", String), - (r'\\', String, 'string-escape'), - ], - 'string-escape': [ - (UCHAR_NO_BACKSLASH, String.Escape, '#pop'), - (r'.', String.Escape, '#pop'), - ], - 'end-of-string': [ - (r'(@)([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)', - bygroups(Operator, Name.Function), '#pop:2'), - (r'\^\^', Operator, '#pop:2'), - default('#pop:2'), - ], - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.rdf + ~~~~~~~~~~~~~~~~~~~ + + Lexers for semantic web and RDF query languages and markup. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups, default +from pygments.token import Keyword, Punctuation, String, Number, Operator, Generic, \ + Whitespace, Name, Literal, Comment, Text + +__all__ = ['SparqlLexer', 'TurtleLexer', 'ShExCLexer'] + + +class SparqlLexer(RegexLexer): + """ + Lexer for `SPARQL `_ query language. + + .. versionadded:: 2.0 + """ + name = 'SPARQL' + aliases = ['sparql'] + filenames = ['*.rq', '*.sparql'] + mimetypes = ['application/sparql-query'] + + # character group definitions :: + + PN_CHARS_BASE_GRP = ('a-zA-Z' + '\u00c0-\u00d6' + '\u00d8-\u00f6' + '\u00f8-\u02ff' + '\u0370-\u037d' + '\u037f-\u1fff' + '\u200c-\u200d' + '\u2070-\u218f' + '\u2c00-\u2fef' + '\u3001-\ud7ff' + '\uf900-\ufdcf' + '\ufdf0-\ufffd') + + PN_CHARS_U_GRP = (PN_CHARS_BASE_GRP + '_') + + PN_CHARS_GRP = (PN_CHARS_U_GRP + + r'\-' + + r'0-9' + + '\u00b7' + + '\u0300-\u036f' + + '\u203f-\u2040') + + HEX_GRP = '0-9A-Fa-f' + + PN_LOCAL_ESC_CHARS_GRP = r' _~.\-!$&"()*+,;=/?#@%' + + # terminal productions :: + + PN_CHARS_BASE = '[' + PN_CHARS_BASE_GRP + ']' + + PN_CHARS_U = '[' + PN_CHARS_U_GRP + ']' + + PN_CHARS = '[' + PN_CHARS_GRP + ']' + + HEX = '[' + HEX_GRP + ']' + + PN_LOCAL_ESC_CHARS = '[' + PN_LOCAL_ESC_CHARS_GRP + ']' + + IRIREF = r'<(?:[^<>"{}|^`\\\x00-\x20])*>' + + BLANK_NODE_LABEL = '_:[0-9' + PN_CHARS_U_GRP + '](?:[' + PN_CHARS_GRP + \ + '.]*' + PN_CHARS + ')?' + + PN_PREFIX = PN_CHARS_BASE + '(?:[' + PN_CHARS_GRP + '.]*' + PN_CHARS + ')?' + + VARNAME = '[0-9' + PN_CHARS_U_GRP + '][' + PN_CHARS_U_GRP + \ + '0-9\u00b7\u0300-\u036f\u203f-\u2040]*' + + PERCENT = '%' + HEX + HEX + + PN_LOCAL_ESC = r'\\' + PN_LOCAL_ESC_CHARS + + PLX = '(?:' + PERCENT + ')|(?:' + PN_LOCAL_ESC + ')' + + PN_LOCAL = ('(?:[' + PN_CHARS_U_GRP + ':0-9' + ']|' + PLX + ')' + + '(?:(?:[' + PN_CHARS_GRP + '.:]|' + PLX + ')*(?:[' + + PN_CHARS_GRP + ':]|' + PLX + '))?') + + EXPONENT = r'[eE][+-]?\d+' + + # Lexer token definitions :: + + tokens = { + 'root': [ + (r'\s+', Text), + # keywords :: + (r'(?i)(select|construct|describe|ask|where|filter|group\s+by|minus|' + r'distinct|reduced|from\s+named|from|order\s+by|desc|asc|limit|' + r'offset|bindings|load|clear|drop|create|add|move|copy|' + r'insert\s+data|delete\s+data|delete\s+where|delete|insert|' + r'using\s+named|using|graph|default|named|all|optional|service|' + r'silent|bind|union|not\s+in|in|as|having|to|prefix|base)\b', Keyword), + (r'(a)\b', Keyword), + # IRIs :: + ('(' + IRIREF + ')', Name.Label), + # blank nodes :: + ('(' + BLANK_NODE_LABEL + ')', Name.Label), + # # variables :: + ('[?$]' + VARNAME, Name.Variable), + # prefixed names :: + (r'(' + PN_PREFIX + r')?(\:)(' + PN_LOCAL + r')?', + bygroups(Name.Namespace, Punctuation, Name.Tag)), + # function names :: + (r'(?i)(str|lang|langmatches|datatype|bound|iri|uri|bnode|rand|abs|' + r'ceil|floor|round|concat|strlen|ucase|lcase|encode_for_uri|' + r'contains|strstarts|strends|strbefore|strafter|year|month|day|' + r'hours|minutes|seconds|timezone|tz|now|md5|sha1|sha256|sha384|' + r'sha512|coalesce|if|strlang|strdt|sameterm|isiri|isuri|isblank|' + r'isliteral|isnumeric|regex|substr|replace|exists|not\s+exists|' + r'count|sum|min|max|avg|sample|group_concat|separator)\b', + Name.Function), + # boolean literals :: + (r'(true|false)', Keyword.Constant), + # double literals :: + (r'[+\-]?(\d+\.\d*' + EXPONENT + r'|\.?\d+' + EXPONENT + ')', Number.Float), + # decimal literals :: + (r'[+\-]?(\d+\.\d*|\.\d+)', Number.Float), + # integer literals :: + (r'[+\-]?\d+', Number.Integer), + # operators :: + (r'(\|\||&&|=|\*|\-|\+|/|!=|<=|>=|!|<|>)', Operator), + # punctuation characters :: + (r'[(){}.;,:^\[\]]', Punctuation), + # line comments :: + (r'#[^\n]*', Comment), + # strings :: + (r'"""', String, 'triple-double-quoted-string'), + (r'"', String, 'single-double-quoted-string'), + (r"'''", String, 'triple-single-quoted-string'), + (r"'", String, 'single-single-quoted-string'), + ], + 'triple-double-quoted-string': [ + (r'"""', String, 'end-of-string'), + (r'[^\\]+', String), + (r'\\', String, 'string-escape'), + ], + 'single-double-quoted-string': [ + (r'"', String, 'end-of-string'), + (r'[^"\\\n]+', String), + (r'\\', String, 'string-escape'), + ], + 'triple-single-quoted-string': [ + (r"'''", String, 'end-of-string'), + (r'[^\\]+', String), + (r'\\', String.Escape, 'string-escape'), + ], + 'single-single-quoted-string': [ + (r"'", String, 'end-of-string'), + (r"[^'\\\n]+", String), + (r'\\', String, 'string-escape'), + ], + 'string-escape': [ + (r'u' + HEX + '{4}', String.Escape, '#pop'), + (r'U' + HEX + '{8}', String.Escape, '#pop'), + (r'.', String.Escape, '#pop'), + ], + 'end-of-string': [ + (r'(@)([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)', + bygroups(Operator, Name.Function), '#pop:2'), + (r'\^\^', Operator, '#pop:2'), + default('#pop:2'), + ], + } + + +class TurtleLexer(RegexLexer): + """ + Lexer for `Turtle `_ data language. + + .. versionadded:: 2.1 + """ + name = 'Turtle' + aliases = ['turtle'] + filenames = ['*.ttl'] + mimetypes = ['text/turtle', 'application/x-turtle'] + + flags = re.IGNORECASE + + patterns = { + 'PNAME_NS': r'((?:[a-z][\w-]*)?\:)', # Simplified character range + 'IRIREF': r'(<[^<>"{}|^`\\\x00-\x20]*>)' + } + + # PNAME_NS PN_LOCAL (with simplified character range) + patterns['PrefixedName'] = r'%(PNAME_NS)s([a-z][\w-]*)' % patterns + + tokens = { + 'root': [ + (r'\s+', Whitespace), + + # Base / prefix + (r'(@base|BASE)(\s+)%(IRIREF)s(\s*)(\.?)' % patterns, + bygroups(Keyword, Whitespace, Name.Variable, Whitespace, + Punctuation)), + (r'(@prefix|PREFIX)(\s+)%(PNAME_NS)s(\s+)%(IRIREF)s(\s*)(\.?)' % patterns, + bygroups(Keyword, Whitespace, Name.Namespace, Whitespace, + Name.Variable, Whitespace, Punctuation)), + + # The shorthand predicate 'a' + (r'(?<=\s)a(?=\s)', Keyword.Type), + + # IRIREF + (r'%(IRIREF)s' % patterns, Name.Variable), + + # PrefixedName + (r'%(PrefixedName)s' % patterns, + bygroups(Name.Namespace, Name.Tag)), + + # Comment + (r'#[^\n]+', Comment), + + (r'\b(true|false)\b', Literal), + (r'[+\-]?\d*\.\d+', Number.Float), + (r'[+\-]?\d*(:?\.\d+)?E[+\-]?\d+', Number.Float), + (r'[+\-]?\d+', Number.Integer), + (r'[\[\](){}.;,:^]', Punctuation), + + (r'"""', String, 'triple-double-quoted-string'), + (r'"', String, 'single-double-quoted-string'), + (r"'''", String, 'triple-single-quoted-string'), + (r"'", String, 'single-single-quoted-string'), + ], + 'triple-double-quoted-string': [ + (r'"""', String, 'end-of-string'), + (r'[^\\]+', String), + (r'\\', String, 'string-escape'), + ], + 'single-double-quoted-string': [ + (r'"', String, 'end-of-string'), + (r'[^"\\\n]+', String), + (r'\\', String, 'string-escape'), + ], + 'triple-single-quoted-string': [ + (r"'''", String, 'end-of-string'), + (r'[^\\]+', String), + (r'\\', String, 'string-escape'), + ], + 'single-single-quoted-string': [ + (r"'", String, 'end-of-string'), + (r"[^'\\\n]+", String), + (r'\\', String, 'string-escape'), + ], + 'string-escape': [ + (r'.', String, '#pop'), + ], + 'end-of-string': [ + (r'(@)([a-z]+(:?-[a-z0-9]+)*)', + bygroups(Operator, Generic.Emph), '#pop:2'), + + (r'(\^\^)%(IRIREF)s' % patterns, bygroups(Operator, Generic.Emph), '#pop:2'), + (r'(\^\^)%(PrefixedName)s' % patterns, + bygroups(Operator, Generic.Emph, Generic.Emph), '#pop:2'), + + default('#pop:2'), + + ], + } + + # Turtle and Tera Term macro files share the same file extension + # but each has a recognizable and distinct syntax. + def analyse_text(text): + for t in ('@base ', 'BASE ', '@prefix ', 'PREFIX '): + if re.search(r'^\s*%s' % t, text): + return 0.80 + + +class ShExCLexer(RegexLexer): + """ + Lexer for `ShExC `_ shape expressions language syntax. + """ + name = 'ShExC' + aliases = ['shexc', 'shex'] + filenames = ['*.shex'] + mimetypes = ['text/shex'] + + # character group definitions :: + + PN_CHARS_BASE_GRP = ('a-zA-Z' + '\u00c0-\u00d6' + '\u00d8-\u00f6' + '\u00f8-\u02ff' + '\u0370-\u037d' + '\u037f-\u1fff' + '\u200c-\u200d' + '\u2070-\u218f' + '\u2c00-\u2fef' + '\u3001-\ud7ff' + '\uf900-\ufdcf' + '\ufdf0-\ufffd') + + PN_CHARS_U_GRP = (PN_CHARS_BASE_GRP + '_') + + PN_CHARS_GRP = (PN_CHARS_U_GRP + + r'\-' + + r'0-9' + + '\u00b7' + + '\u0300-\u036f' + + '\u203f-\u2040') + + HEX_GRP = '0-9A-Fa-f' + + PN_LOCAL_ESC_CHARS_GRP = r"_~.\-!$&'()*+,;=/?#@%" + + # terminal productions :: + + PN_CHARS_BASE = '[' + PN_CHARS_BASE_GRP + ']' + + PN_CHARS_U = '[' + PN_CHARS_U_GRP + ']' + + PN_CHARS = '[' + PN_CHARS_GRP + ']' + + HEX = '[' + HEX_GRP + ']' + + PN_LOCAL_ESC_CHARS = '[' + PN_LOCAL_ESC_CHARS_GRP + ']' + + UCHAR_NO_BACKSLASH = '(?:u' + HEX + '{4}|U' + HEX + '{8})' + + UCHAR = r'\\' + UCHAR_NO_BACKSLASH + + IRIREF = r'<(?:[^\x00-\x20<>"{}|^`\\]|' + UCHAR + ')*>' + + BLANK_NODE_LABEL = '_:[0-9' + PN_CHARS_U_GRP + '](?:[' + PN_CHARS_GRP + \ + '.]*' + PN_CHARS + ')?' + + PN_PREFIX = PN_CHARS_BASE + '(?:[' + PN_CHARS_GRP + '.]*' + PN_CHARS + ')?' + + PERCENT = '%' + HEX + HEX + + PN_LOCAL_ESC = r'\\' + PN_LOCAL_ESC_CHARS + + PLX = '(?:' + PERCENT + ')|(?:' + PN_LOCAL_ESC + ')' + + PN_LOCAL = ('(?:[' + PN_CHARS_U_GRP + ':0-9' + ']|' + PLX + ')' + + '(?:(?:[' + PN_CHARS_GRP + '.:]|' + PLX + ')*(?:[' + + PN_CHARS_GRP + ':]|' + PLX + '))?') + + EXPONENT = r'[eE][+-]?\d+' + + # Lexer token definitions :: + + tokens = { + 'root': [ + (r'\s+', Text), + # keywords :: + (r'(?i)(base|prefix|start|external|' + r'literal|iri|bnode|nonliteral|length|minlength|maxlength|' + r'mininclusive|minexclusive|maxinclusive|maxexclusive|' + r'totaldigits|fractiondigits|' + r'closed|extra)\b', Keyword), + (r'(a)\b', Keyword), + # IRIs :: + ('(' + IRIREF + ')', Name.Label), + # blank nodes :: + ('(' + BLANK_NODE_LABEL + ')', Name.Label), + # prefixed names :: + (r'(' + PN_PREFIX + r')?(\:)(' + PN_LOCAL + ')?', + bygroups(Name.Namespace, Punctuation, Name.Tag)), + # boolean literals :: + (r'(true|false)', Keyword.Constant), + # double literals :: + (r'[+\-]?(\d+\.\d*' + EXPONENT + r'|\.?\d+' + EXPONENT + ')', Number.Float), + # decimal literals :: + (r'[+\-]?(\d+\.\d*|\.\d+)', Number.Float), + # integer literals :: + (r'[+\-]?\d+', Number.Integer), + # operators :: + (r'[@|$&=*+?^\-~]', Operator), + # operator keywords :: + (r'(?i)(and|or|not)\b', Operator.Word), + # punctuation characters :: + (r'[(){}.;,:^\[\]]', Punctuation), + # line comments :: + (r'#[^\n]*', Comment), + # strings :: + (r'"""', String, 'triple-double-quoted-string'), + (r'"', String, 'single-double-quoted-string'), + (r"'''", String, 'triple-single-quoted-string'), + (r"'", String, 'single-single-quoted-string'), + ], + 'triple-double-quoted-string': [ + (r'"""', String, 'end-of-string'), + (r'[^\\]+', String), + (r'\\', String, 'string-escape'), + ], + 'single-double-quoted-string': [ + (r'"', String, 'end-of-string'), + (r'[^"\\\n]+', String), + (r'\\', String, 'string-escape'), + ], + 'triple-single-quoted-string': [ + (r"'''", String, 'end-of-string'), + (r'[^\\]+', String), + (r'\\', String.Escape, 'string-escape'), + ], + 'single-single-quoted-string': [ + (r"'", String, 'end-of-string'), + (r"[^'\\\n]+", String), + (r'\\', String, 'string-escape'), + ], + 'string-escape': [ + (UCHAR_NO_BACKSLASH, String.Escape, '#pop'), + (r'.', String.Escape, '#pop'), + ], + 'end-of-string': [ + (r'(@)([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)', + bygroups(Operator, Name.Function), '#pop:2'), + (r'\^\^', Operator, '#pop:2'), + default('#pop:2'), + ], + } diff --git a/pygments/lexers/rebol.py b/pygments/lexers/rebol.py old mode 100644 new mode 100755 index 1b3d90f..1f30d7b --- a/pygments/lexers/rebol.py +++ b/pygments/lexers/rebol.py @@ -1,431 +1,431 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.rebol - ~~~~~~~~~~~~~~~~~~~~~ - - Lexers for the REBOL and related languages. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, bygroups -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Generic, Whitespace - -__all__ = ['RebolLexer', 'RedLexer'] - - -class RebolLexer(RegexLexer): - """ - A `REBOL `_ lexer. - - .. versionadded:: 1.1 - """ - name = 'REBOL' - aliases = ['rebol'] - filenames = ['*.r', '*.r3', '*.reb'] - mimetypes = ['text/x-rebol'] - - flags = re.IGNORECASE | re.MULTILINE - - escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)' - - def word_callback(lexer, match): - word = match.group() - - if re.match(".*:$", word): - yield match.start(), Generic.Subheading, word - elif re.match( - r'(native|alias|all|any|as-string|as-binary|bind|bound\?|case|' - r'catch|checksum|comment|debase|dehex|exclude|difference|disarm|' - r'either|else|enbase|foreach|remove-each|form|free|get|get-env|if|' - r'in|intersect|loop|minimum-of|maximum-of|mold|new-line|' - r'new-line\?|not|now|prin|print|reduce|compose|construct|repeat|' - r'reverse|save|script\?|set|shift|switch|throw|to-hex|trace|try|' - r'type\?|union|unique|unless|unprotect|unset|until|use|value\?|' - r'while|compress|decompress|secure|open|close|read|read-io|' - r'write-io|write|update|query|wait|input\?|exp|log-10|log-2|' - r'log-e|square-root|cosine|sine|tangent|arccosine|arcsine|' - r'arctangent|protect|lowercase|uppercase|entab|detab|connected\?|' - r'browse|launch|stats|get-modes|set-modes|to-local-file|' - r'to-rebol-file|encloak|decloak|create-link|do-browser|bind\?|' - r'hide|draw|show|size-text|textinfo|offset-to-caret|' - r'caret-to-offset|local-request-file|rgb-to-hsv|hsv-to-rgb|' - r'crypt-strength\?|dh-make-key|dh-generate-key|dh-compute-key|' - r'dsa-make-key|dsa-generate-key|dsa-make-signature|' - r'dsa-verify-signature|rsa-make-key|rsa-generate-key|' - r'rsa-encrypt)$', word): - yield match.start(), Name.Builtin, word - elif re.match( - r'(add|subtract|multiply|divide|remainder|power|and~|or~|xor~|' - r'minimum|maximum|negate|complement|absolute|random|head|tail|' - r'next|back|skip|at|pick|first|second|third|fourth|fifth|sixth|' - r'seventh|eighth|ninth|tenth|last|path|find|select|make|to|copy\*|' - r'insert|remove|change|poke|clear|trim|sort|min|max|abs|cp|' - r'copy)$', word): - yield match.start(), Name.Function, word - elif re.match( - r'(error|source|input|license|help|install|echo|Usage|with|func|' - r'throw-on-error|function|does|has|context|probe|\?\?|as-pair|' - r'mod|modulo|round|repend|about|set-net|append|join|rejoin|reform|' - r'remold|charset|array|replace|move|extract|forskip|forall|alter|' - r'first+|also|take|for|forever|dispatch|attempt|what-dir|' - r'change-dir|clean-path|list-dir|dirize|rename|split-path|delete|' - r'make-dir|delete-dir|in-dir|confirm|dump-obj|upgrade|what|' - r'build-tag|process-source|build-markup|decode-cgi|read-cgi|' - r'write-user|save-user|set-user-name|protect-system|parse-xml|' - r'cvs-date|cvs-version|do-boot|get-net-info|desktop|layout|' - r'scroll-para|get-face|alert|set-face|uninstall|unfocus|' - r'request-dir|center-face|do-events|net-error|decode-url|' - r'parse-header|parse-header-date|parse-email-addrs|import-email|' - r'send|build-attach-body|resend|show-popup|hide-popup|open-events|' - r'find-key-face|do-face|viewtop|confine|find-window|' - r'insert-event-func|remove-event-func|inform|dump-pane|dump-face|' - r'flag-face|deflag-face|clear-fields|read-net|vbug|path-thru|' - r'read-thru|load-thru|do-thru|launch-thru|load-image|' - r'request-download|do-face-alt|set-font|set-para|get-style|' - r'set-style|make-face|stylize|choose|hilight-text|hilight-all|' - r'unlight-text|focus|scroll-drag|clear-face|reset-face|scroll-face|' - r'resize-face|load-stock|load-stock-block|notify|request|flash|' - r'request-color|request-pass|request-text|request-list|' - r'request-date|request-file|dbug|editor|link-relative-path|' - r'emailer|parse-error)$', word): - yield match.start(), Keyword.Namespace, word - elif re.match( - r'(halt|quit|do|load|q|recycle|call|run|ask|parse|view|unview|' - r'return|exit|break)$', word): - yield match.start(), Name.Exception, word - elif re.match('REBOL$', word): - yield match.start(), Generic.Heading, word - elif re.match("to-.*", word): - yield match.start(), Keyword, word - elif re.match(r'(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$', - word): - yield match.start(), Operator, word - elif re.match(r".*\?$", word): - yield match.start(), Keyword, word - elif re.match(r".*\!$", word): - yield match.start(), Keyword.Type, word - elif re.match("'.*", word): - yield match.start(), Name.Variable.Instance, word # lit-word - elif re.match("#.*", word): - yield match.start(), Name.Label, word # issue - elif re.match("%.*", word): - yield match.start(), Name.Decorator, word # file - else: - yield match.start(), Name.Variable, word - - tokens = { - 'root': [ - (r'[^R]+', Comment), - (r'REBOL\s+\[', Generic.Strong, 'script'), - (r'R', Comment) - ], - 'script': [ - (r'\s+', Text), - (r'#"', String.Char, 'char'), - (r'#\{[0-9a-f]*\}', Number.Hex), - (r'2#\{', Number.Hex, 'bin2'), - (r'64#\{[0-9a-z+/=\s]*\}', Number.Hex), - (r'"', String, 'string'), - (r'\{', String, 'string2'), - (r';#+.*\n', Comment.Special), - (r';\*+.*\n', Comment.Preproc), - (r';.*\n', Comment), - (r'%"', Name.Decorator, 'stringFile'), - (r'%[^(^{")\s\[\]]+', Name.Decorator), - (r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money - (r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time - (r'\d+[\-/][0-9a-z]+[\-/]\d+(\/\d+\:\d+((\:\d+)?' - r'([.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date - (r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple - (r'\d+X\d+', Keyword.Constant), # pair - (r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float), - (r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float), - (r'[+-]?\d+(\'\d+)?', Number), - (r'[\[\]()]', Generic.Strong), - (r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url - (r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url - (r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email - (r'comment\s"', Comment, 'commentString1'), - (r'comment\s\{', Comment, 'commentString2'), - (r'comment\s\[', Comment, 'commentBlock'), - (r'comment\s[^(\s{"\[]+', Comment), - (r'/[^(^{")\s/[\]]*', Name.Attribute), - (r'([^(^{")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback), - (r'<[\w:.-]*>', Name.Tag), - (r'<[^(<>\s")]+', Name.Tag, 'tag'), - (r'([^(^{")\s]+)', Text), - ], - 'string': [ - (r'[^(^")]+', String), - (escape_re, String.Escape), - (r'[(|)]+', String), - (r'\^.', String.Escape), - (r'"', String, '#pop'), - ], - 'string2': [ - (r'[^(^{})]+', String), - (escape_re, String.Escape), - (r'[(|)]+', String), - (r'\^.', String.Escape), - (r'\{', String, '#push'), - (r'\}', String, '#pop'), - ], - 'stringFile': [ - (r'[^(^")]+', Name.Decorator), - (escape_re, Name.Decorator), - (r'\^.', Name.Decorator), - (r'"', Name.Decorator, '#pop'), - ], - 'char': [ - (escape_re + '"', String.Char, '#pop'), - (r'\^."', String.Char, '#pop'), - (r'."', String.Char, '#pop'), - ], - 'tag': [ - (escape_re, Name.Tag), - (r'"', Name.Tag, 'tagString'), - (r'[^(<>\r\n")]+', Name.Tag), - (r'>', Name.Tag, '#pop'), - ], - 'tagString': [ - (r'[^(^")]+', Name.Tag), - (escape_re, Name.Tag), - (r'[(|)]+', Name.Tag), - (r'\^.', Name.Tag), - (r'"', Name.Tag, '#pop'), - ], - 'tuple': [ - (r'(\d+\.)+', Keyword.Constant), - (r'\d+', Keyword.Constant, '#pop'), - ], - 'bin2': [ - (r'\s+', Number.Hex), - (r'([01]\s*){8}', Number.Hex), - (r'\}', Number.Hex, '#pop'), - ], - 'commentString1': [ - (r'[^(^")]+', Comment), - (escape_re, Comment), - (r'[(|)]+', Comment), - (r'\^.', Comment), - (r'"', Comment, '#pop'), - ], - 'commentString2': [ - (r'[^(^{})]+', Comment), - (escape_re, Comment), - (r'[(|)]+', Comment), - (r'\^.', Comment), - (r'\{', Comment, '#push'), - (r'\}', Comment, '#pop'), - ], - 'commentBlock': [ - (r'\[', Comment, '#push'), - (r'\]', Comment, '#pop'), - (r'"', Comment, "commentString1"), - (r'\{', Comment, "commentString2"), - (r'[^(\[\]"{)]+', Comment), - ], - } - - def analyse_text(text): - """ - Check if code contains REBOL header and so it probably not R code - """ - if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE): - # The code starts with REBOL header - return 1.0 - elif re.search(r'\s*REBOL\s*\[', text, re.IGNORECASE): - # The code contains REBOL header but also some text before it - return 0.5 - - -class RedLexer(RegexLexer): - """ - A `Red-language `_ lexer. - - .. versionadded:: 2.0 - """ - name = 'Red' - aliases = ['red', 'red/system'] - filenames = ['*.red', '*.reds'] - mimetypes = ['text/x-red', 'text/x-red-system'] - - flags = re.IGNORECASE | re.MULTILINE - - escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)' - - def word_callback(lexer, match): - word = match.group() - - if re.match(".*:$", word): - yield match.start(), Generic.Subheading, word - elif re.match(r'(if|unless|either|any|all|while|until|loop|repeat|' - r'foreach|forall|func|function|does|has|switch|' - r'case|reduce|compose|get|set|print|prin|equal\?|' - r'not-equal\?|strict-equal\?|lesser\?|greater\?|lesser-or-equal\?|' - r'greater-or-equal\?|same\?|not|type\?|stats|' - r'bind|union|replace|charset|routine)$', word): - yield match.start(), Name.Builtin, word - elif re.match(r'(make|random|reflect|to|form|mold|absolute|add|divide|multiply|negate|' - r'power|remainder|round|subtract|even\?|odd\?|and~|complement|or~|xor~|' - r'append|at|back|change|clear|copy|find|head|head\?|index\?|insert|' - r'length\?|next|pick|poke|remove|reverse|select|sort|skip|swap|tail|tail\?|' - r'take|trim|create|close|delete|modify|open|open\?|query|read|rename|' - r'update|write)$', word): - yield match.start(), Name.Function, word - elif re.match(r'(yes|on|no|off|true|false|tab|cr|lf|newline|escape|slash|sp|space|null|' - r'none|crlf|dot|null-byte)$', word): - yield match.start(), Name.Builtin.Pseudo, word - elif re.match(r'(#system-global|#include|#enum|#define|#either|#if|#import|#export|' - r'#switch|#default|#get-definition)$', word): - yield match.start(), Keyword.Namespace, word - elif re.match(r'(system|halt|quit|quit-return|do|load|q|recycle|call|run|ask|parse|' - r'raise-error|return|exit|break|alias|push|pop|probe|\?\?|spec-of|body-of|' - r'quote|forever)$', word): - yield match.start(), Name.Exception, word - elif re.match(r'(action\?|block\?|char\?|datatype\?|file\?|function\?|get-path\?|zero\?|' - r'get-word\?|integer\?|issue\?|lit-path\?|lit-word\?|logic\?|native\?|' - r'op\?|paren\?|path\?|refinement\?|set-path\?|set-word\?|string\?|unset\?|' - r'any-struct\?|none\?|word\?|any-series\?)$', word): - yield match.start(), Keyword, word - elif re.match(r'(JNICALL|stdcall|cdecl|infix)$', word): - yield match.start(), Keyword.Namespace, word - elif re.match("to-.*", word): - yield match.start(), Keyword, word - elif re.match(r'(\+|-\*\*|-|\*\*|//|/|\*|and|or|xor|=\?|===|==|=|<>|<=|>=|' - r'<<<|>>>|<<|>>|<|>%)$', word): - yield match.start(), Operator, word - elif re.match(r".*\!$", word): - yield match.start(), Keyword.Type, word - elif re.match("'.*", word): - yield match.start(), Name.Variable.Instance, word # lit-word - elif re.match("#.*", word): - yield match.start(), Name.Label, word # issue - elif re.match("%.*", word): - yield match.start(), Name.Decorator, word # file - elif re.match(":.*", word): - yield match.start(), Generic.Subheading, word # get-word - else: - yield match.start(), Name.Variable, word - - tokens = { - 'root': [ - (r'[^R]+', Comment), - (r'Red/System\s+\[', Generic.Strong, 'script'), - (r'Red\s+\[', Generic.Strong, 'script'), - (r'R', Comment) - ], - 'script': [ - (r'\s+', Text), - (r'#"', String.Char, 'char'), - (r'#\{[0-9a-f\s]*\}', Number.Hex), - (r'2#\{', Number.Hex, 'bin2'), - (r'64#\{[0-9a-z+/=\s]*\}', Number.Hex), - (r'([0-9a-f]+)(h)((\s)|(?=[\[\]{}"()]))', - bygroups(Number.Hex, Name.Variable, Whitespace)), - (r'"', String, 'string'), - (r'\{', String, 'string2'), - (r';#+.*\n', Comment.Special), - (r';\*+.*\n', Comment.Preproc), - (r';.*\n', Comment), - (r'%"', Name.Decorator, 'stringFile'), - (r'%[^(^{")\s\[\]]+', Name.Decorator), - (r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money - (r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time - (r'\d+[\-/][0-9a-z]+[\-/]\d+(/\d+:\d+((:\d+)?' - r'([\.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date - (r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple - (r'\d+X\d+', Keyword.Constant), # pair - (r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float), - (r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float), - (r'[+-]?\d+(\'\d+)?', Number), - (r'[\[\]()]', Generic.Strong), - (r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url - (r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url - (r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email - (r'comment\s"', Comment, 'commentString1'), - (r'comment\s\{', Comment, 'commentString2'), - (r'comment\s\[', Comment, 'commentBlock'), - (r'comment\s[^(\s{"\[]+', Comment), - (r'/[^(^{^")\s/[\]]*', Name.Attribute), - (r'([^(^{^")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback), - (r'<[\w:.-]*>', Name.Tag), - (r'<[^(<>\s")]+', Name.Tag, 'tag'), - (r'([^(^{")\s]+)', Text), - ], - 'string': [ - (r'[^(^")]+', String), - (escape_re, String.Escape), - (r'[(|)]+', String), - (r'\^.', String.Escape), - (r'"', String, '#pop'), - ], - 'string2': [ - (r'[^(^{})]+', String), - (escape_re, String.Escape), - (r'[(|)]+', String), - (r'\^.', String.Escape), - (r'\{', String, '#push'), - (r'\}', String, '#pop'), - ], - 'stringFile': [ - (r'[^(^")]+', Name.Decorator), - (escape_re, Name.Decorator), - (r'\^.', Name.Decorator), - (r'"', Name.Decorator, '#pop'), - ], - 'char': [ - (escape_re + '"', String.Char, '#pop'), - (r'\^."', String.Char, '#pop'), - (r'."', String.Char, '#pop'), - ], - 'tag': [ - (escape_re, Name.Tag), - (r'"', Name.Tag, 'tagString'), - (r'[^(<>\r\n")]+', Name.Tag), - (r'>', Name.Tag, '#pop'), - ], - 'tagString': [ - (r'[^(^")]+', Name.Tag), - (escape_re, Name.Tag), - (r'[(|)]+', Name.Tag), - (r'\^.', Name.Tag), - (r'"', Name.Tag, '#pop'), - ], - 'tuple': [ - (r'(\d+\.)+', Keyword.Constant), - (r'\d+', Keyword.Constant, '#pop'), - ], - 'bin2': [ - (r'\s+', Number.Hex), - (r'([01]\s*){8}', Number.Hex), - (r'\}', Number.Hex, '#pop'), - ], - 'commentString1': [ - (r'[^(^")]+', Comment), - (escape_re, Comment), - (r'[(|)]+', Comment), - (r'\^.', Comment), - (r'"', Comment, '#pop'), - ], - 'commentString2': [ - (r'[^(^{})]+', Comment), - (escape_re, Comment), - (r'[(|)]+', Comment), - (r'\^.', Comment), - (r'\{', Comment, '#push'), - (r'\}', Comment, '#pop'), - ], - 'commentBlock': [ - (r'\[', Comment, '#push'), - (r'\]', Comment, '#pop'), - (r'"', Comment, "commentString1"), - (r'\{', Comment, "commentString2"), - (r'[^(\[\]"{)]+', Comment), - ], - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.rebol + ~~~~~~~~~~~~~~~~~~~~~ + + Lexers for the REBOL and related languages. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Generic, Whitespace + +__all__ = ['RebolLexer', 'RedLexer'] + + +class RebolLexer(RegexLexer): + """ + A `REBOL `_ lexer. + + .. versionadded:: 1.1 + """ + name = 'REBOL' + aliases = ['rebol'] + filenames = ['*.r', '*.r3', '*.reb'] + mimetypes = ['text/x-rebol'] + + flags = re.IGNORECASE | re.MULTILINE + + escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)' + + def word_callback(lexer, match): + word = match.group() + + if re.match(".*:$", word): + yield match.start(), Generic.Subheading, word + elif re.match( + r'(native|alias|all|any|as-string|as-binary|bind|bound\?|case|' + r'catch|checksum|comment|debase|dehex|exclude|difference|disarm|' + r'either|else|enbase|foreach|remove-each|form|free|get|get-env|if|' + r'in|intersect|loop|minimum-of|maximum-of|mold|new-line|' + r'new-line\?|not|now|prin|print|reduce|compose|construct|repeat|' + r'reverse|save|script\?|set|shift|switch|throw|to-hex|trace|try|' + r'type\?|union|unique|unless|unprotect|unset|until|use|value\?|' + r'while|compress|decompress|secure|open|close|read|read-io|' + r'write-io|write|update|query|wait|input\?|exp|log-10|log-2|' + r'log-e|square-root|cosine|sine|tangent|arccosine|arcsine|' + r'arctangent|protect|lowercase|uppercase|entab|detab|connected\?|' + r'browse|launch|stats|get-modes|set-modes|to-local-file|' + r'to-rebol-file|encloak|decloak|create-link|do-browser|bind\?|' + r'hide|draw|show|size-text|textinfo|offset-to-caret|' + r'caret-to-offset|local-request-file|rgb-to-hsv|hsv-to-rgb|' + r'crypt-strength\?|dh-make-key|dh-generate-key|dh-compute-key|' + r'dsa-make-key|dsa-generate-key|dsa-make-signature|' + r'dsa-verify-signature|rsa-make-key|rsa-generate-key|' + r'rsa-encrypt)$', word): + yield match.start(), Name.Builtin, word + elif re.match( + r'(add|subtract|multiply|divide|remainder|power|and~|or~|xor~|' + r'minimum|maximum|negate|complement|absolute|random|head|tail|' + r'next|back|skip|at|pick|first|second|third|fourth|fifth|sixth|' + r'seventh|eighth|ninth|tenth|last|path|find|select|make|to|copy\*|' + r'insert|remove|change|poke|clear|trim|sort|min|max|abs|cp|' + r'copy)$', word): + yield match.start(), Name.Function, word + elif re.match( + r'(error|source|input|license|help|install|echo|Usage|with|func|' + r'throw-on-error|function|does|has|context|probe|\?\?|as-pair|' + r'mod|modulo|round|repend|about|set-net|append|join|rejoin|reform|' + r'remold|charset|array|replace|move|extract|forskip|forall|alter|' + r'first+|also|take|for|forever|dispatch|attempt|what-dir|' + r'change-dir|clean-path|list-dir|dirize|rename|split-path|delete|' + r'make-dir|delete-dir|in-dir|confirm|dump-obj|upgrade|what|' + r'build-tag|process-source|build-markup|decode-cgi|read-cgi|' + r'write-user|save-user|set-user-name|protect-system|parse-xml|' + r'cvs-date|cvs-version|do-boot|get-net-info|desktop|layout|' + r'scroll-para|get-face|alert|set-face|uninstall|unfocus|' + r'request-dir|center-face|do-events|net-error|decode-url|' + r'parse-header|parse-header-date|parse-email-addrs|import-email|' + r'send|build-attach-body|resend|show-popup|hide-popup|open-events|' + r'find-key-face|do-face|viewtop|confine|find-window|' + r'insert-event-func|remove-event-func|inform|dump-pane|dump-face|' + r'flag-face|deflag-face|clear-fields|read-net|vbug|path-thru|' + r'read-thru|load-thru|do-thru|launch-thru|load-image|' + r'request-download|do-face-alt|set-font|set-para|get-style|' + r'set-style|make-face|stylize|choose|hilight-text|hilight-all|' + r'unlight-text|focus|scroll-drag|clear-face|reset-face|scroll-face|' + r'resize-face|load-stock|load-stock-block|notify|request|flash|' + r'request-color|request-pass|request-text|request-list|' + r'request-date|request-file|dbug|editor|link-relative-path|' + r'emailer|parse-error)$', word): + yield match.start(), Keyword.Namespace, word + elif re.match( + r'(halt|quit|do|load|q|recycle|call|run|ask|parse|view|unview|' + r'return|exit|break)$', word): + yield match.start(), Name.Exception, word + elif re.match('REBOL$', word): + yield match.start(), Generic.Heading, word + elif re.match("to-.*", word): + yield match.start(), Keyword, word + elif re.match(r'(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$', + word): + yield match.start(), Operator, word + elif re.match(r".*\?$", word): + yield match.start(), Keyword, word + elif re.match(r".*\!$", word): + yield match.start(), Keyword.Type, word + elif re.match("'.*", word): + yield match.start(), Name.Variable.Instance, word # lit-word + elif re.match("#.*", word): + yield match.start(), Name.Label, word # issue + elif re.match("%.*", word): + yield match.start(), Name.Decorator, word # file + else: + yield match.start(), Name.Variable, word + + tokens = { + 'root': [ + (r'[^R]+', Comment), + (r'REBOL\s+\[', Generic.Strong, 'script'), + (r'R', Comment) + ], + 'script': [ + (r'\s+', Text), + (r'#"', String.Char, 'char'), + (r'#\{[0-9a-f]*\}', Number.Hex), + (r'2#\{', Number.Hex, 'bin2'), + (r'64#\{[0-9a-z+/=\s]*\}', Number.Hex), + (r'"', String, 'string'), + (r'\{', String, 'string2'), + (r';#+.*\n', Comment.Special), + (r';\*+.*\n', Comment.Preproc), + (r';.*\n', Comment), + (r'%"', Name.Decorator, 'stringFile'), + (r'%[^(^{")\s\[\]]+', Name.Decorator), + (r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money + (r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time + (r'\d+[\-/][0-9a-z]+[\-/]\d+(\/\d+\:\d+((\:\d+)?' + r'([.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date + (r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple + (r'\d+X\d+', Keyword.Constant), # pair + (r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float), + (r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float), + (r'[+-]?\d+(\'\d+)?', Number), + (r'[\[\]()]', Generic.Strong), + (r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url + (r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url + (r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email + (r'comment\s"', Comment, 'commentString1'), + (r'comment\s\{', Comment, 'commentString2'), + (r'comment\s\[', Comment, 'commentBlock'), + (r'comment\s[^(\s{"\[]+', Comment), + (r'/[^(^{")\s/[\]]*', Name.Attribute), + (r'([^(^{")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback), + (r'<[\w:.-]*>', Name.Tag), + (r'<[^(<>\s")]+', Name.Tag, 'tag'), + (r'([^(^{")\s]+)', Text), + ], + 'string': [ + (r'[^(^")]+', String), + (escape_re, String.Escape), + (r'[(|)]+', String), + (r'\^.', String.Escape), + (r'"', String, '#pop'), + ], + 'string2': [ + (r'[^(^{})]+', String), + (escape_re, String.Escape), + (r'[(|)]+', String), + (r'\^.', String.Escape), + (r'\{', String, '#push'), + (r'\}', String, '#pop'), + ], + 'stringFile': [ + (r'[^(^")]+', Name.Decorator), + (escape_re, Name.Decorator), + (r'\^.', Name.Decorator), + (r'"', Name.Decorator, '#pop'), + ], + 'char': [ + (escape_re + '"', String.Char, '#pop'), + (r'\^."', String.Char, '#pop'), + (r'."', String.Char, '#pop'), + ], + 'tag': [ + (escape_re, Name.Tag), + (r'"', Name.Tag, 'tagString'), + (r'[^(<>\r\n")]+', Name.Tag), + (r'>', Name.Tag, '#pop'), + ], + 'tagString': [ + (r'[^(^")]+', Name.Tag), + (escape_re, Name.Tag), + (r'[(|)]+', Name.Tag), + (r'\^.', Name.Tag), + (r'"', Name.Tag, '#pop'), + ], + 'tuple': [ + (r'(\d+\.)+', Keyword.Constant), + (r'\d+', Keyword.Constant, '#pop'), + ], + 'bin2': [ + (r'\s+', Number.Hex), + (r'([01]\s*){8}', Number.Hex), + (r'\}', Number.Hex, '#pop'), + ], + 'commentString1': [ + (r'[^(^")]+', Comment), + (escape_re, Comment), + (r'[(|)]+', Comment), + (r'\^.', Comment), + (r'"', Comment, '#pop'), + ], + 'commentString2': [ + (r'[^(^{})]+', Comment), + (escape_re, Comment), + (r'[(|)]+', Comment), + (r'\^.', Comment), + (r'\{', Comment, '#push'), + (r'\}', Comment, '#pop'), + ], + 'commentBlock': [ + (r'\[', Comment, '#push'), + (r'\]', Comment, '#pop'), + (r'"', Comment, "commentString1"), + (r'\{', Comment, "commentString2"), + (r'[^(\[\]"{)]+', Comment), + ], + } + + def analyse_text(text): + """ + Check if code contains REBOL header and so it probably not R code + """ + if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE): + # The code starts with REBOL header + return 1.0 + elif re.search(r'\s*REBOL\s*\[', text, re.IGNORECASE): + # The code contains REBOL header but also some text before it + return 0.5 + + +class RedLexer(RegexLexer): + """ + A `Red-language `_ lexer. + + .. versionadded:: 2.0 + """ + name = 'Red' + aliases = ['red', 'red/system'] + filenames = ['*.red', '*.reds'] + mimetypes = ['text/x-red', 'text/x-red-system'] + + flags = re.IGNORECASE | re.MULTILINE + + escape_re = r'(?:\^\([0-9a-f]{1,4}\)*)' + + def word_callback(lexer, match): + word = match.group() + + if re.match(".*:$", word): + yield match.start(), Generic.Subheading, word + elif re.match(r'(if|unless|either|any|all|while|until|loop|repeat|' + r'foreach|forall|func|function|does|has|switch|' + r'case|reduce|compose|get|set|print|prin|equal\?|' + r'not-equal\?|strict-equal\?|lesser\?|greater\?|lesser-or-equal\?|' + r'greater-or-equal\?|same\?|not|type\?|stats|' + r'bind|union|replace|charset|routine)$', word): + yield match.start(), Name.Builtin, word + elif re.match(r'(make|random|reflect|to|form|mold|absolute|add|divide|multiply|negate|' + r'power|remainder|round|subtract|even\?|odd\?|and~|complement|or~|xor~|' + r'append|at|back|change|clear|copy|find|head|head\?|index\?|insert|' + r'length\?|next|pick|poke|remove|reverse|select|sort|skip|swap|tail|tail\?|' + r'take|trim|create|close|delete|modify|open|open\?|query|read|rename|' + r'update|write)$', word): + yield match.start(), Name.Function, word + elif re.match(r'(yes|on|no|off|true|false|tab|cr|lf|newline|escape|slash|sp|space|null|' + r'none|crlf|dot|null-byte)$', word): + yield match.start(), Name.Builtin.Pseudo, word + elif re.match(r'(#system-global|#include|#enum|#define|#either|#if|#import|#export|' + r'#switch|#default|#get-definition)$', word): + yield match.start(), Keyword.Namespace, word + elif re.match(r'(system|halt|quit|quit-return|do|load|q|recycle|call|run|ask|parse|' + r'raise-error|return|exit|break|alias|push|pop|probe|\?\?|spec-of|body-of|' + r'quote|forever)$', word): + yield match.start(), Name.Exception, word + elif re.match(r'(action\?|block\?|char\?|datatype\?|file\?|function\?|get-path\?|zero\?|' + r'get-word\?|integer\?|issue\?|lit-path\?|lit-word\?|logic\?|native\?|' + r'op\?|paren\?|path\?|refinement\?|set-path\?|set-word\?|string\?|unset\?|' + r'any-struct\?|none\?|word\?|any-series\?)$', word): + yield match.start(), Keyword, word + elif re.match(r'(JNICALL|stdcall|cdecl|infix)$', word): + yield match.start(), Keyword.Namespace, word + elif re.match("to-.*", word): + yield match.start(), Keyword, word + elif re.match(r'(\+|-\*\*|-|\*\*|//|/|\*|and|or|xor|=\?|===|==|=|<>|<=|>=|' + r'<<<|>>>|<<|>>|<|>%)$', word): + yield match.start(), Operator, word + elif re.match(r".*\!$", word): + yield match.start(), Keyword.Type, word + elif re.match("'.*", word): + yield match.start(), Name.Variable.Instance, word # lit-word + elif re.match("#.*", word): + yield match.start(), Name.Label, word # issue + elif re.match("%.*", word): + yield match.start(), Name.Decorator, word # file + elif re.match(":.*", word): + yield match.start(), Generic.Subheading, word # get-word + else: + yield match.start(), Name.Variable, word + + tokens = { + 'root': [ + (r'[^R]+', Comment), + (r'Red/System\s+\[', Generic.Strong, 'script'), + (r'Red\s+\[', Generic.Strong, 'script'), + (r'R', Comment) + ], + 'script': [ + (r'\s+', Text), + (r'#"', String.Char, 'char'), + (r'#\{[0-9a-f\s]*\}', Number.Hex), + (r'2#\{', Number.Hex, 'bin2'), + (r'64#\{[0-9a-z+/=\s]*\}', Number.Hex), + (r'([0-9a-f]+)(h)((\s)|(?=[\[\]{}"()]))', + bygroups(Number.Hex, Name.Variable, Whitespace)), + (r'"', String, 'string'), + (r'\{', String, 'string2'), + (r';#+.*\n', Comment.Special), + (r';\*+.*\n', Comment.Preproc), + (r';.*\n', Comment), + (r'%"', Name.Decorator, 'stringFile'), + (r'%[^(^{")\s\[\]]+', Name.Decorator), + (r'[+-]?([a-z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money + (r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time + (r'\d+[\-/][0-9a-z]+[\-/]\d+(/\d+:\d+((:\d+)?' + r'([\.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date + (r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple + (r'\d+X\d+', Keyword.Constant), # pair + (r'[+-]?\d+(\'\d+)?([.,]\d*)?E[+-]?\d+', Number.Float), + (r'[+-]?\d+(\'\d+)?[.,]\d*', Number.Float), + (r'[+-]?\d+(\'\d+)?', Number), + (r'[\[\]()]', Generic.Strong), + (r'[a-z]+[^(^{"\s:)]*://[^(^{"\s)]*', Name.Decorator), # url + (r'mailto:[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # url + (r'[^(^{"@\s)]+@[^(^{"@\s)]+', Name.Decorator), # email + (r'comment\s"', Comment, 'commentString1'), + (r'comment\s\{', Comment, 'commentString2'), + (r'comment\s\[', Comment, 'commentBlock'), + (r'comment\s[^(\s{"\[]+', Comment), + (r'/[^(^{^")\s/[\]]*', Name.Attribute), + (r'([^(^{^")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback), + (r'<[\w:.-]*>', Name.Tag), + (r'<[^(<>\s")]+', Name.Tag, 'tag'), + (r'([^(^{")\s]+)', Text), + ], + 'string': [ + (r'[^(^")]+', String), + (escape_re, String.Escape), + (r'[(|)]+', String), + (r'\^.', String.Escape), + (r'"', String, '#pop'), + ], + 'string2': [ + (r'[^(^{})]+', String), + (escape_re, String.Escape), + (r'[(|)]+', String), + (r'\^.', String.Escape), + (r'\{', String, '#push'), + (r'\}', String, '#pop'), + ], + 'stringFile': [ + (r'[^(^")]+', Name.Decorator), + (escape_re, Name.Decorator), + (r'\^.', Name.Decorator), + (r'"', Name.Decorator, '#pop'), + ], + 'char': [ + (escape_re + '"', String.Char, '#pop'), + (r'\^."', String.Char, '#pop'), + (r'."', String.Char, '#pop'), + ], + 'tag': [ + (escape_re, Name.Tag), + (r'"', Name.Tag, 'tagString'), + (r'[^(<>\r\n")]+', Name.Tag), + (r'>', Name.Tag, '#pop'), + ], + 'tagString': [ + (r'[^(^")]+', Name.Tag), + (escape_re, Name.Tag), + (r'[(|)]+', Name.Tag), + (r'\^.', Name.Tag), + (r'"', Name.Tag, '#pop'), + ], + 'tuple': [ + (r'(\d+\.)+', Keyword.Constant), + (r'\d+', Keyword.Constant, '#pop'), + ], + 'bin2': [ + (r'\s+', Number.Hex), + (r'([01]\s*){8}', Number.Hex), + (r'\}', Number.Hex, '#pop'), + ], + 'commentString1': [ + (r'[^(^")]+', Comment), + (escape_re, Comment), + (r'[(|)]+', Comment), + (r'\^.', Comment), + (r'"', Comment, '#pop'), + ], + 'commentString2': [ + (r'[^(^{})]+', Comment), + (escape_re, Comment), + (r'[(|)]+', Comment), + (r'\^.', Comment), + (r'\{', Comment, '#push'), + (r'\}', Comment, '#pop'), + ], + 'commentBlock': [ + (r'\[', Comment, '#push'), + (r'\]', Comment, '#pop'), + (r'"', Comment, "commentString1"), + (r'\{', Comment, "commentString2"), + (r'[^(\[\]"{)]+', Comment), + ], + } diff --git a/pygments/lexers/resource.py b/pygments/lexers/resource.py old mode 100644 new mode 100755 index ccd4e5f..2e3fc0e --- a/pygments/lexers/resource.py +++ b/pygments/lexers/resource.py @@ -1,85 +1,85 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.resource - ~~~~~~~~~~~~~~~~~~~~~~~~ - - Lexer for resource definition files. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, bygroups, words -from pygments.token import Comment, String, Number, Operator, Text, \ - Keyword, Name - -__all__ = ['ResourceLexer'] - - -class ResourceLexer(RegexLexer): - """Lexer for `ICU Resource bundles - `_. - - .. versionadded:: 2.0 - """ - name = 'ResourceBundle' - aliases = ['resource', 'resourcebundle'] - filenames = [] - - _types = (':table', ':array', ':string', ':bin', ':import', ':intvector', - ':int', ':alias') - - flags = re.MULTILINE | re.IGNORECASE - tokens = { - 'root': [ - (r'//.*?$', Comment), - (r'"', String, 'string'), - (r'-?\d+', Number.Integer), - (r'[,{}]', Operator), - (r'([^\s{:]+)(\s*)(%s?)' % '|'.join(_types), - bygroups(Name, Text, Keyword)), - (r'\s+', Text), - (words(_types), Keyword), - ], - 'string': [ - (r'(\\x[0-9a-f]{2}|\\u[0-9a-f]{4}|\\U00[0-9a-f]{6}|' - r'\\[0-7]{1,3}|\\c.|\\[abtnvfre\'"?\\]|\\\{|[^"{\\])+', String), - (r'\{', String.Escape, 'msgname'), - (r'"', String, '#pop') - ], - 'msgname': [ - (r'([^{},]+)(\s*)', bygroups(Name, String.Escape), ('#pop', 'message')) - ], - 'message': [ - (r'\{', String.Escape, 'msgname'), - (r'\}', String.Escape, '#pop'), - (r'(,)(\s*)([a-z]+)(\s*\})', - bygroups(Operator, String.Escape, Keyword, String.Escape), '#pop'), - (r'(,)(\s*)([a-z]+)(\s*)(,)(\s*)(offset)(\s*)(:)(\s*)(-?\d+)(\s*)', - bygroups(Operator, String.Escape, Keyword, String.Escape, Operator, - String.Escape, Operator.Word, String.Escape, Operator, - String.Escape, Number.Integer, String.Escape), 'choice'), - (r'(,)(\s*)([a-z]+)(\s*)(,)(\s*)', - bygroups(Operator, String.Escape, Keyword, String.Escape, Operator, - String.Escape), 'choice'), - (r'\s+', String.Escape) - ], - 'choice': [ - (r'(=|<|>|<=|>=|!=)(-?\d+)(\s*\{)', - bygroups(Operator, Number.Integer, String.Escape), 'message'), - (r'([a-z]+)(\s*\{)', bygroups(Keyword.Type, String.Escape), 'str'), - (r'\}', String.Escape, ('#pop', '#pop')), - (r'\s+', String.Escape) - ], - 'str': [ - (r'\}', String.Escape, '#pop'), - (r'\{', String.Escape, 'msgname'), - (r'[^{}]+', String) - ] - } - - def analyse_text(text): - if text.startswith('root:table'): - return 1.0 +# -*- coding: utf-8 -*- +""" + pygments.lexers.resource + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexer for resource definition files. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups, words +from pygments.token import Comment, String, Number, Operator, Text, \ + Keyword, Name + +__all__ = ['ResourceLexer'] + + +class ResourceLexer(RegexLexer): + """Lexer for `ICU Resource bundles + `_. + + .. versionadded:: 2.0 + """ + name = 'ResourceBundle' + aliases = ['resource', 'resourcebundle'] + filenames = [] + + _types = (':table', ':array', ':string', ':bin', ':import', ':intvector', + ':int', ':alias') + + flags = re.MULTILINE | re.IGNORECASE + tokens = { + 'root': [ + (r'//.*?$', Comment), + (r'"', String, 'string'), + (r'-?\d+', Number.Integer), + (r'[,{}]', Operator), + (r'([^\s{:]+)(\s*)(%s?)' % '|'.join(_types), + bygroups(Name, Text, Keyword)), + (r'\s+', Text), + (words(_types), Keyword), + ], + 'string': [ + (r'(\\x[0-9a-f]{2}|\\u[0-9a-f]{4}|\\U00[0-9a-f]{6}|' + r'\\[0-7]{1,3}|\\c.|\\[abtnvfre\'"?\\]|\\\{|[^"{\\])+', String), + (r'\{', String.Escape, 'msgname'), + (r'"', String, '#pop') + ], + 'msgname': [ + (r'([^{},]+)(\s*)', bygroups(Name, String.Escape), ('#pop', 'message')) + ], + 'message': [ + (r'\{', String.Escape, 'msgname'), + (r'\}', String.Escape, '#pop'), + (r'(,)(\s*)([a-z]+)(\s*\})', + bygroups(Operator, String.Escape, Keyword, String.Escape), '#pop'), + (r'(,)(\s*)([a-z]+)(\s*)(,)(\s*)(offset)(\s*)(:)(\s*)(-?\d+)(\s*)', + bygroups(Operator, String.Escape, Keyword, String.Escape, Operator, + String.Escape, Operator.Word, String.Escape, Operator, + String.Escape, Number.Integer, String.Escape), 'choice'), + (r'(,)(\s*)([a-z]+)(\s*)(,)(\s*)', + bygroups(Operator, String.Escape, Keyword, String.Escape, Operator, + String.Escape), 'choice'), + (r'\s+', String.Escape) + ], + 'choice': [ + (r'(=|<|>|<=|>=|!=)(-?\d+)(\s*\{)', + bygroups(Operator, Number.Integer, String.Escape), 'message'), + (r'([a-z]+)(\s*\{)', bygroups(Keyword.Type, String.Escape), 'str'), + (r'\}', String.Escape, ('#pop', '#pop')), + (r'\s+', String.Escape) + ], + 'str': [ + (r'\}', String.Escape, '#pop'), + (r'\{', String.Escape, 'msgname'), + (r'[^{}]+', String) + ] + } + + def analyse_text(text): + if text.startswith('root:table'): + return 1.0 diff --git a/pygments/lexers/ride.py b/pygments/lexers/ride.py old mode 100644 new mode 100755 index 17bc246..205ea5c --- a/pygments/lexers/ride.py +++ b/pygments/lexers/ride.py @@ -1,139 +1,139 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.ride - ~~~~~~~~~~~~~~~~~~~~ - - Lexer for the Ride programming language. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import RegexLexer, words, include -from pygments.token import Comment, Keyword, Name, Number, Punctuation, String, Text - -__all__ = ['RideLexer'] - - -class RideLexer(RegexLexer): - """ - For `Ride `_ - source code. - - .. versionadded:: 2.6 - """ - - name = 'Ride' - aliases = ['ride'] - filenames = ['*.ride'] - mimetypes = ['text/x-ride'] - - validName = r'[a-zA-Z_][a-zA-Z0-9_\']*' - - builtinOps = ( - '||', '|', '>=', '>', '==', '!', - '=', '<=', '<', '::', ':+', ':', '!=', '/', - '.', '=>', '-', '+', '*', '&&', '%', '++', - ) - - globalVariablesName = ( - 'NOALG', 'MD5', 'SHA1', 'SHA224', 'SHA256', 'SHA384', 'SHA512', - 'SHA3224', 'SHA3256', 'SHA3384', 'SHA3512', 'nil', 'this', 'unit', - 'height', 'lastBlock', 'Buy', 'Sell', 'CEILING', 'FLOOR', 'DOWN', - 'HALFDOWN', 'HALFEVEN', 'HALFUP', 'UP', - ) - - typesName = ( - 'Unit', 'Int', 'Boolean', 'ByteVector', 'String', 'Address', 'Alias', - 'Transfer', 'AssetPair', 'DataEntry', 'Order', 'Transaction', - 'GenesisTransaction', 'PaymentTransaction', 'ReissueTransaction', - 'BurnTransaction', 'MassTransferTransaction', 'ExchangeTransaction', - 'TransferTransaction', 'SetAssetScriptTransaction', - 'InvokeScriptTransaction', 'IssueTransaction', 'LeaseTransaction', - 'LeaseCancelTransaction', 'CreateAliasTransaction', - 'SetScriptTransaction', 'SponsorFeeTransaction', 'DataTransaction', - 'WriteSet', 'AttachedPayment', 'ScriptTransfer', 'TransferSet', - 'ScriptResult', 'Invocation', 'Asset', 'BlockInfo', 'Issue', 'Reissue', - 'Burn', 'NoAlg', 'Md5', 'Sha1', 'Sha224', 'Sha256', 'Sha384', 'Sha512', - 'Sha3224', 'Sha3256', 'Sha3384', 'Sha3512', 'BinaryEntry', - 'BooleanEntry', 'IntegerEntry', 'StringEntry', 'List', 'Ceiling', - 'Down', 'Floor', 'HalfDown', 'HalfEven', 'HalfUp', 'Up', - ) - - functionsName = ( - 'fraction', 'size', 'toBytes', 'take', 'drop', 'takeRight', 'dropRight', - 'toString', 'isDefined', 'extract', 'throw', 'getElement', 'value', - 'cons', 'toUtf8String', 'toInt', 'indexOf', 'lastIndexOf', 'split', - 'parseInt', 'parseIntValue', 'keccak256', 'blake2b256', 'sha256', - 'sigVerify', 'toBase58String', 'fromBase58String', 'toBase64String', - 'fromBase64String', 'transactionById', 'transactionHeightById', - 'getInteger', 'getBoolean', 'getBinary', 'getString', - 'addressFromPublicKey', 'addressFromString', 'addressFromRecipient', - 'assetBalance', 'wavesBalance', 'getIntegerValue', 'getBooleanValue', - 'getBinaryValue', 'getStringValue', 'addressFromStringValue', - 'assetInfo', 'rsaVerify', 'checkMerkleProof', 'median', - 'valueOrElse', 'valueOrErrorMessage', 'contains', 'log', 'pow', - 'toBase16String', 'fromBase16String', 'blockInfoByHeight', - 'transferTransactionById', - ) - - reservedWords = words(( - 'match', 'case', 'else', 'func', 'if', - 'let', 'then', '@Callable', '@Verifier', - ), suffix=r'\b') - - tokens = { - 'root': [ - # Comments - (r'#.*', Comment.Single), - # Whitespace - (r'\s+', Text), - # Strings - (r'"', String, 'doublequote'), - (r'utf8\'', String, 'utf8quote'), - (r'base(58|64|16)\'', String, 'singlequote'), - # Keywords - (reservedWords, Keyword.Reserved), - (r'\{-#.*?#-\}', Keyword.Reserved), - (r'FOLD<\d+>', Keyword.Reserved), - # Types - (words(typesName), Keyword.Type), - # Main - # (specialName, Keyword.Reserved), - # Prefix Operators - (words(builtinOps, prefix=r'\(', suffix=r'\)'), Name.Function), - # Infix Operators - (words(builtinOps), Name.Function), - (words(globalVariablesName), Name.Function), - (words(functionsName), Name.Function), - # Numbers - include('numbers'), - # Variable Names - (validName, Name.Variable), - # Parens - (r'[,()\[\]{}]', Punctuation), - ], - - 'doublequote': [ - (r'\\u[0-9a-fA-F]{4}', String.Escape), - (r'\\[nrfvb\\"]', String.Escape), - (r'[^"]', String), - (r'"', String, '#pop'), - ], - - 'utf8quote': [ - (r'\\u[0-9a-fA-F]{4}', String.Escape), - (r'\\[nrfvb\\\']', String.Escape), - (r'[^\']', String), - (r'\'', String, '#pop'), - ], - - 'singlequote': [ - (r'[^\']', String), - (r'\'', String, '#pop'), - ], - - 'numbers': [ - (r'_?\d+', Number.Integer), - ], - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.ride + ~~~~~~~~~~~~~~~~~~~~ + + Lexer for the Ride programming language. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, words, include +from pygments.token import Comment, Keyword, Name, Number, Punctuation, String, Text + +__all__ = ['RideLexer'] + + +class RideLexer(RegexLexer): + """ + For `Ride `_ + source code. + + .. versionadded:: 2.6 + """ + + name = 'Ride' + aliases = ['ride'] + filenames = ['*.ride'] + mimetypes = ['text/x-ride'] + + validName = r'[a-zA-Z_][a-zA-Z0-9_\']*' + + builtinOps = ( + '||', '|', '>=', '>', '==', '!', + '=', '<=', '<', '::', ':+', ':', '!=', '/', + '.', '=>', '-', '+', '*', '&&', '%', '++', + ) + + globalVariablesName = ( + 'NOALG', 'MD5', 'SHA1', 'SHA224', 'SHA256', 'SHA384', 'SHA512', + 'SHA3224', 'SHA3256', 'SHA3384', 'SHA3512', 'nil', 'this', 'unit', + 'height', 'lastBlock', 'Buy', 'Sell', 'CEILING', 'FLOOR', 'DOWN', + 'HALFDOWN', 'HALFEVEN', 'HALFUP', 'UP', + ) + + typesName = ( + 'Unit', 'Int', 'Boolean', 'ByteVector', 'String', 'Address', 'Alias', + 'Transfer', 'AssetPair', 'DataEntry', 'Order', 'Transaction', + 'GenesisTransaction', 'PaymentTransaction', 'ReissueTransaction', + 'BurnTransaction', 'MassTransferTransaction', 'ExchangeTransaction', + 'TransferTransaction', 'SetAssetScriptTransaction', + 'InvokeScriptTransaction', 'IssueTransaction', 'LeaseTransaction', + 'LeaseCancelTransaction', 'CreateAliasTransaction', + 'SetScriptTransaction', 'SponsorFeeTransaction', 'DataTransaction', + 'WriteSet', 'AttachedPayment', 'ScriptTransfer', 'TransferSet', + 'ScriptResult', 'Invocation', 'Asset', 'BlockInfo', 'Issue', 'Reissue', + 'Burn', 'NoAlg', 'Md5', 'Sha1', 'Sha224', 'Sha256', 'Sha384', 'Sha512', + 'Sha3224', 'Sha3256', 'Sha3384', 'Sha3512', 'BinaryEntry', + 'BooleanEntry', 'IntegerEntry', 'StringEntry', 'List', 'Ceiling', + 'Down', 'Floor', 'HalfDown', 'HalfEven', 'HalfUp', 'Up', + ) + + functionsName = ( + 'fraction', 'size', 'toBytes', 'take', 'drop', 'takeRight', 'dropRight', + 'toString', 'isDefined', 'extract', 'throw', 'getElement', 'value', + 'cons', 'toUtf8String', 'toInt', 'indexOf', 'lastIndexOf', 'split', + 'parseInt', 'parseIntValue', 'keccak256', 'blake2b256', 'sha256', + 'sigVerify', 'toBase58String', 'fromBase58String', 'toBase64String', + 'fromBase64String', 'transactionById', 'transactionHeightById', + 'getInteger', 'getBoolean', 'getBinary', 'getString', + 'addressFromPublicKey', 'addressFromString', 'addressFromRecipient', + 'assetBalance', 'wavesBalance', 'getIntegerValue', 'getBooleanValue', + 'getBinaryValue', 'getStringValue', 'addressFromStringValue', + 'assetInfo', 'rsaVerify', 'checkMerkleProof', 'median', + 'valueOrElse', 'valueOrErrorMessage', 'contains', 'log', 'pow', + 'toBase16String', 'fromBase16String', 'blockInfoByHeight', + 'transferTransactionById', + ) + + reservedWords = words(( + 'match', 'case', 'else', 'func', 'if', + 'let', 'then', '@Callable', '@Verifier', + ), suffix=r'\b') + + tokens = { + 'root': [ + # Comments + (r'#.*', Comment.Single), + # Whitespace + (r'\s+', Text), + # Strings + (r'"', String, 'doublequote'), + (r'utf8\'', String, 'utf8quote'), + (r'base(58|64|16)\'', String, 'singlequote'), + # Keywords + (reservedWords, Keyword.Reserved), + (r'\{-#.*?#-\}', Keyword.Reserved), + (r'FOLD<\d+>', Keyword.Reserved), + # Types + (words(typesName), Keyword.Type), + # Main + # (specialName, Keyword.Reserved), + # Prefix Operators + (words(builtinOps, prefix=r'\(', suffix=r'\)'), Name.Function), + # Infix Operators + (words(builtinOps), Name.Function), + (words(globalVariablesName), Name.Function), + (words(functionsName), Name.Function), + # Numbers + include('numbers'), + # Variable Names + (validName, Name.Variable), + # Parens + (r'[,()\[\]{}]', Punctuation), + ], + + 'doublequote': [ + (r'\\u[0-9a-fA-F]{4}', String.Escape), + (r'\\[nrfvb\\"]', String.Escape), + (r'[^"]', String), + (r'"', String, '#pop'), + ], + + 'utf8quote': [ + (r'\\u[0-9a-fA-F]{4}', String.Escape), + (r'\\[nrfvb\\\']', String.Escape), + (r'[^\']', String), + (r'\'', String, '#pop'), + ], + + 'singlequote': [ + (r'[^\']', String), + (r'\'', String, '#pop'), + ], + + 'numbers': [ + (r'_?\d+', Number.Integer), + ], + } diff --git a/pygments/lexers/rnc.py b/pygments/lexers/rnc.py old mode 100644 new mode 100755 index 8f0ba5c..baed881 --- a/pygments/lexers/rnc.py +++ b/pygments/lexers/rnc.py @@ -1,67 +1,67 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.rnc - ~~~~~~~~~~~~~~~~~~~ - - Lexer for Relax-NG Compact syntax - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import RegexLexer -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Punctuation - -__all__ = ['RNCCompactLexer'] - - -class RNCCompactLexer(RegexLexer): - """ - For `RelaxNG-compact `_ syntax. - - .. versionadded:: 2.2 - """ - - name = 'Relax-NG Compact' - aliases = ['rnc', 'rng-compact'] - filenames = ['*.rnc'] - - tokens = { - 'root': [ - (r'namespace\b', Keyword.Namespace), - (r'(?:default|datatypes)\b', Keyword.Declaration), - (r'##.*$', Comment.Preproc), - (r'#.*$', Comment.Single), - (r'"[^"]*"', String.Double), - # TODO single quoted strings and escape sequences outside of - # double-quoted strings - (r'(?:element|attribute|mixed)\b', Keyword.Declaration, 'variable'), - (r'(text\b|xsd:[^ ]+)', Keyword.Type, 'maybe_xsdattributes'), - (r'[,?&*=|~]|>>', Operator), - (r'[(){}]', Punctuation), - (r'.', Text), - ], - - # a variable has been declared using `element` or `attribute` - 'variable': [ - (r'[^{]+', Name.Variable), - (r'\{', Punctuation, '#pop'), - ], - - # after an xsd: declaration there may be attributes - 'maybe_xsdattributes': [ - (r'\{', Punctuation, 'xsdattributes'), - (r'\}', Punctuation, '#pop'), - (r'.', Text), - ], - - # attributes take the form { key1 = value1 key2 = value2 ... } - 'xsdattributes': [ - (r'[^ =}]', Name.Attribute), - (r'=', Operator), - (r'"[^"]*"', String.Double), - (r'\}', Punctuation, '#pop'), - (r'.', Text), - ], - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.rnc + ~~~~~~~~~~~~~~~~~~~ + + Lexer for Relax-NG Compact syntax + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Punctuation + +__all__ = ['RNCCompactLexer'] + + +class RNCCompactLexer(RegexLexer): + """ + For `RelaxNG-compact `_ syntax. + + .. versionadded:: 2.2 + """ + + name = 'Relax-NG Compact' + aliases = ['rnc', 'rng-compact'] + filenames = ['*.rnc'] + + tokens = { + 'root': [ + (r'namespace\b', Keyword.Namespace), + (r'(?:default|datatypes)\b', Keyword.Declaration), + (r'##.*$', Comment.Preproc), + (r'#.*$', Comment.Single), + (r'"[^"]*"', String.Double), + # TODO single quoted strings and escape sequences outside of + # double-quoted strings + (r'(?:element|attribute|mixed)\b', Keyword.Declaration, 'variable'), + (r'(text\b|xsd:[^ ]+)', Keyword.Type, 'maybe_xsdattributes'), + (r'[,?&*=|~]|>>', Operator), + (r'[(){}]', Punctuation), + (r'.', Text), + ], + + # a variable has been declared using `element` or `attribute` + 'variable': [ + (r'[^{]+', Name.Variable), + (r'\{', Punctuation, '#pop'), + ], + + # after an xsd: declaration there may be attributes + 'maybe_xsdattributes': [ + (r'\{', Punctuation, 'xsdattributes'), + (r'\}', Punctuation, '#pop'), + (r'.', Text), + ], + + # attributes take the form { key1 = value1 key2 = value2 ... } + 'xsdattributes': [ + (r'[^ =}]', Name.Attribute), + (r'=', Operator), + (r'"[^"]*"', String.Double), + (r'\}', Punctuation, '#pop'), + (r'.', Text), + ], + } diff --git a/pygments/lexers/roboconf.py b/pygments/lexers/roboconf.py old mode 100644 new mode 100755 index f820fe1..12cda47 --- a/pygments/lexers/roboconf.py +++ b/pygments/lexers/roboconf.py @@ -1,82 +1,82 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.roboconf - ~~~~~~~~~~~~~~~~~~~~~~~~ - - Lexers for Roboconf DSL. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import RegexLexer, words, re -from pygments.token import Text, Operator, Keyword, Name, Comment - -__all__ = ['RoboconfGraphLexer', 'RoboconfInstancesLexer'] - - -class RoboconfGraphLexer(RegexLexer): - """ - Lexer for `Roboconf `_ graph files. - - .. versionadded:: 2.1 - """ - name = 'Roboconf Graph' - aliases = ['roboconf-graph'] - filenames = ['*.graph'] - - flags = re.IGNORECASE | re.MULTILINE - tokens = { - 'root': [ - # Skip white spaces - (r'\s+', Text), - - # There is one operator - (r'=', Operator), - - # Keywords - (words(('facet', 'import'), suffix=r'\s*\b', prefix=r'\b'), Keyword), - (words(( - 'installer', 'extends', 'exports', 'imports', 'facets', - 'children'), suffix=r'\s*:?', prefix=r'\b'), Name), - - # Comments - (r'#.*\n', Comment), - - # Default - (r'[^#]', Text), - (r'.*\n', Text) - ] - } - - -class RoboconfInstancesLexer(RegexLexer): - """ - Lexer for `Roboconf `_ instances files. - - .. versionadded:: 2.1 - """ - name = 'Roboconf Instances' - aliases = ['roboconf-instances'] - filenames = ['*.instances'] - - flags = re.IGNORECASE | re.MULTILINE - tokens = { - 'root': [ - - # Skip white spaces - (r'\s+', Text), - - # Keywords - (words(('instance of', 'import'), suffix=r'\s*\b', prefix=r'\b'), Keyword), - (words(('name', 'count'), suffix=r's*:?', prefix=r'\b'), Name), - (r'\s*[\w.-]+\s*:', Name), - - # Comments - (r'#.*\n', Comment), - - # Default - (r'[^#]', Text), - (r'.*\n', Text) - ] - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.roboconf + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Roboconf DSL. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, words, re +from pygments.token import Text, Operator, Keyword, Name, Comment + +__all__ = ['RoboconfGraphLexer', 'RoboconfInstancesLexer'] + + +class RoboconfGraphLexer(RegexLexer): + """ + Lexer for `Roboconf `_ graph files. + + .. versionadded:: 2.1 + """ + name = 'Roboconf Graph' + aliases = ['roboconf-graph'] + filenames = ['*.graph'] + + flags = re.IGNORECASE | re.MULTILINE + tokens = { + 'root': [ + # Skip white spaces + (r'\s+', Text), + + # There is one operator + (r'=', Operator), + + # Keywords + (words(('facet', 'import'), suffix=r'\s*\b', prefix=r'\b'), Keyword), + (words(( + 'installer', 'extends', 'exports', 'imports', 'facets', + 'children'), suffix=r'\s*:?', prefix=r'\b'), Name), + + # Comments + (r'#.*\n', Comment), + + # Default + (r'[^#]', Text), + (r'.*\n', Text) + ] + } + + +class RoboconfInstancesLexer(RegexLexer): + """ + Lexer for `Roboconf `_ instances files. + + .. versionadded:: 2.1 + """ + name = 'Roboconf Instances' + aliases = ['roboconf-instances'] + filenames = ['*.instances'] + + flags = re.IGNORECASE | re.MULTILINE + tokens = { + 'root': [ + + # Skip white spaces + (r'\s+', Text), + + # Keywords + (words(('instance of', 'import'), suffix=r'\s*\b', prefix=r'\b'), Keyword), + (words(('name', 'count'), suffix=r's*:?', prefix=r'\b'), Name), + (r'\s*[\w.-]+\s*:', Name), + + # Comments + (r'#.*\n', Comment), + + # Default + (r'[^#]', Text), + (r'.*\n', Text) + ] + } diff --git a/pygments/lexers/robotframework.py b/pygments/lexers/robotframework.py old mode 100644 new mode 100755 index ddaddb2..c605996 --- a/pygments/lexers/robotframework.py +++ b/pygments/lexers/robotframework.py @@ -1,559 +1,552 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.robotframework - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Lexer for Robot Framework. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -# Copyright 2012 Nokia Siemens Networks Oyj -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re - -from pygments.lexer import Lexer -from pygments.token import Token - -__all__ = ['RobotFrameworkLexer'] - - -HEADING = Token.Generic.Heading -SETTING = Token.Keyword.Namespace -IMPORT = Token.Name.Namespace -TC_KW_NAME = Token.Generic.Subheading -KEYWORD = Token.Name.Function -ARGUMENT = Token.String -VARIABLE = Token.Name.Variable -COMMENT = Token.Comment -SEPARATOR = Token.Punctuation -SYNTAX = Token.Punctuation -GHERKIN = Token.Generic.Emph -ERROR = Token.Error - - -def normalize(string, remove=''): - string = string.lower() - for char in remove + ' ': - if char in string: - string = string.replace(char, '') - return string - - -class RobotFrameworkLexer(Lexer): - """ - For `Robot Framework `_ test data. - - Supports both space and pipe separated plain text formats. - - .. versionadded:: 1.6 - """ - name = 'RobotFramework' - aliases = ['robotframework'] - filenames = ['*.robot'] - mimetypes = ['text/x-robotframework'] - - def __init__(self, **options): - options['tabsize'] = 2 - options['encoding'] = 'UTF-8' - Lexer.__init__(self, **options) - - def get_tokens_unprocessed(self, text): - row_tokenizer = RowTokenizer() - var_tokenizer = VariableTokenizer() - index = 0 - for row in text.splitlines(): - for value, token in row_tokenizer.tokenize(row): - for value, token in var_tokenizer.tokenize(value, token): - if value: - yield index, token, str(value) - index += len(value) - - -class VariableTokenizer: - - def tokenize(self, string, token): - var = VariableSplitter(string, identifiers='$@%&') - if var.start < 0 or token in (COMMENT, ERROR): - yield string, token - return - for value, token in self._tokenize(var, string, token): - if value: - yield value, token - - def _tokenize(self, var, string, orig_token): - before = string[:var.start] - yield before, orig_token - yield var.identifier + '{', SYNTAX - for value, token in self.tokenize(var.base, VARIABLE): - yield value, token - yield '}', SYNTAX - if var.index: - yield '[', SYNTAX - for value, token in self.tokenize(var.index, VARIABLE): - yield value, token - yield ']', SYNTAX - for value, token in self.tokenize(string[var.end:], orig_token): - yield value, token - - -class RowTokenizer: - - def __init__(self): - self._table = UnknownTable() - self._splitter = RowSplitter() - testcases = TestCaseTable() - settings = SettingTable(testcases.set_default_template) - variables = VariableTable() - keywords = KeywordTable() - self._tables = {'settings': settings, 'setting': settings, - 'metadata': settings, - 'variables': variables, 'variable': variables, - 'testcases': testcases, 'testcase': testcases, - 'keywords': keywords, 'keyword': keywords, - 'userkeywords': keywords, 'userkeyword': keywords} - - def tokenize(self, row): - commented = False - heading = False - for index, value in enumerate(self._splitter.split(row)): - # First value, and every second after that, is a separator. - index, separator = divmod(index-1, 2) - if value.startswith('#'): - commented = True - elif index == 0 and value.startswith('*'): - self._table = self._start_table(value) - heading = True - for value, token in self._tokenize(value, index, commented, - separator, heading): - yield value, token - self._table.end_row() - - def _start_table(self, header): - name = normalize(header, remove='*') - return self._tables.get(name, UnknownTable()) - - def _tokenize(self, value, index, commented, separator, heading): - if commented: - yield value, COMMENT - elif separator: - yield value, SEPARATOR - elif heading: - yield value, HEADING - else: - for value, token in self._table.tokenize(value, index): - yield value, token - - -class RowSplitter: - _space_splitter = re.compile('( {2,})') - _pipe_splitter = re.compile(r'((?:^| +)\|(?: +|$))') - - def split(self, row): - splitter = (row.startswith('| ') and self._split_from_pipes - or self._split_from_spaces) - for value in splitter(row): - yield value - yield '\n' - - def _split_from_spaces(self, row): - yield '' # Start with (pseudo)separator similarly as with pipes - for value in self._space_splitter.split(row): - yield value - - def _split_from_pipes(self, row): - _, separator, rest = self._pipe_splitter.split(row, 1) - yield separator - while self._pipe_splitter.search(rest): - cell, separator, rest = self._pipe_splitter.split(rest, 1) - yield cell - yield separator - yield rest - - -class Tokenizer: - _tokens = None - - def __init__(self): - self._index = 0 - - def tokenize(self, value): - values_and_tokens = self._tokenize(value, self._index) - self._index += 1 - if isinstance(values_and_tokens, type(Token)): - values_and_tokens = [(value, values_and_tokens)] - return values_and_tokens - - def _tokenize(self, value, index): - index = min(index, len(self._tokens) - 1) - return self._tokens[index] - - def _is_assign(self, value): - if value.endswith('='): - value = value[:-1].strip() - var = VariableSplitter(value, identifiers='$@&') - return var.start == 0 and var.end == len(value) - - -class Comment(Tokenizer): - _tokens = (COMMENT,) - - -class Setting(Tokenizer): - _tokens = (SETTING, ARGUMENT) - _keyword_settings = ('suitesetup', 'suiteprecondition', 'suiteteardown', - 'suitepostcondition', 'testsetup', 'testprecondition', - 'testteardown', 'testpostcondition', 'testtemplate') - _import_settings = ('library', 'resource', 'variables') - _other_settings = ('documentation', 'metadata', 'forcetags', 'defaulttags', - 'testtimeout') - _custom_tokenizer = None - - def __init__(self, template_setter=None): - Tokenizer.__init__(self) - self._template_setter = template_setter - - def _tokenize(self, value, index): - if index == 1 and self._template_setter: - self._template_setter(value) - if index == 0: - normalized = normalize(value) - if normalized in self._keyword_settings: - self._custom_tokenizer = KeywordCall(support_assign=False) - elif normalized in self._import_settings: - self._custom_tokenizer = ImportSetting() - elif normalized not in self._other_settings: - return ERROR - elif self._custom_tokenizer: - return self._custom_tokenizer.tokenize(value) - return Tokenizer._tokenize(self, value, index) - - -class ImportSetting(Tokenizer): - _tokens = (IMPORT, ARGUMENT) - - -class TestCaseSetting(Setting): - _keyword_settings = ('setup', 'precondition', 'teardown', 'postcondition', - 'template') - _import_settings = () - _other_settings = ('documentation', 'tags', 'timeout') - - def _tokenize(self, value, index): - if index == 0: - type = Setting._tokenize(self, value[1:-1], index) - return [('[', SYNTAX), (value[1:-1], type), (']', SYNTAX)] - return Setting._tokenize(self, value, index) - - -class KeywordSetting(TestCaseSetting): - _keyword_settings = ('teardown',) - _other_settings = ('documentation', 'arguments', 'return', 'timeout', 'tags') - - -class Variable(Tokenizer): - _tokens = (SYNTAX, ARGUMENT) - - def _tokenize(self, value, index): - if index == 0 and not self._is_assign(value): - return ERROR - return Tokenizer._tokenize(self, value, index) - - -class KeywordCall(Tokenizer): - _tokens = (KEYWORD, ARGUMENT) - - def __init__(self, support_assign=True): - Tokenizer.__init__(self) - self._keyword_found = not support_assign - self._assigns = 0 - - def _tokenize(self, value, index): - if not self._keyword_found and self._is_assign(value): - self._assigns += 1 - return SYNTAX # VariableTokenizer tokenizes this later. - if self._keyword_found: - return Tokenizer._tokenize(self, value, index - self._assigns) - self._keyword_found = True - return GherkinTokenizer().tokenize(value, KEYWORD) - - -class GherkinTokenizer: - _gherkin_prefix = re.compile('^(Given|When|Then|And) ', re.IGNORECASE) - - def tokenize(self, value, token): - match = self._gherkin_prefix.match(value) - if not match: - return [(value, token)] - end = match.end() - return [(value[:end], GHERKIN), (value[end:], token)] - - -class TemplatedKeywordCall(Tokenizer): - _tokens = (ARGUMENT,) - - -class ForLoop(Tokenizer): - - def __init__(self): - Tokenizer.__init__(self) - self._in_arguments = False - - def _tokenize(self, value, index): - token = self._in_arguments and ARGUMENT or SYNTAX - if value.upper() in ('IN', 'IN RANGE'): - self._in_arguments = True - return token - - -class _Table: - _tokenizer_class = None - - def __init__(self, prev_tokenizer=None): - self._tokenizer = self._tokenizer_class() - self._prev_tokenizer = prev_tokenizer - self._prev_values_on_row = [] - - def tokenize(self, value, index): - if self._continues(value, index): - self._tokenizer = self._prev_tokenizer - yield value, SYNTAX - else: - for value_and_token in self._tokenize(value, index): - yield value_and_token - self._prev_values_on_row.append(value) - - def _continues(self, value, index): - return value == '...' and all(self._is_empty(t) - for t in self._prev_values_on_row) - - def _is_empty(self, value): - return value in ('', '\\') - - def _tokenize(self, value, index): - return self._tokenizer.tokenize(value) - - def end_row(self): - self.__init__(prev_tokenizer=self._tokenizer) - - -class UnknownTable(_Table): - _tokenizer_class = Comment - - def _continues(self, value, index): - return False - - -class VariableTable(_Table): - _tokenizer_class = Variable - - -class SettingTable(_Table): - _tokenizer_class = Setting - - def __init__(self, template_setter, prev_tokenizer=None): - _Table.__init__(self, prev_tokenizer) - self._template_setter = template_setter - - def _tokenize(self, value, index): - if index == 0 and normalize(value) == 'testtemplate': - self._tokenizer = Setting(self._template_setter) - return _Table._tokenize(self, value, index) - - def end_row(self): - self.__init__(self._template_setter, prev_tokenizer=self._tokenizer) - - -class TestCaseTable(_Table): - _setting_class = TestCaseSetting - _test_template = None - _default_template = None - - @property - def _tokenizer_class(self): - if self._test_template or (self._default_template and - self._test_template is not False): - return TemplatedKeywordCall - return KeywordCall - - def _continues(self, value, index): - return index > 0 and _Table._continues(self, value, index) - - def _tokenize(self, value, index): - if index == 0: - if value: - self._test_template = None - return GherkinTokenizer().tokenize(value, TC_KW_NAME) - if index == 1 and self._is_setting(value): - if self._is_template(value): - self._test_template = False - self._tokenizer = self._setting_class(self.set_test_template) - else: - self._tokenizer = self._setting_class() - if index == 1 and self._is_for_loop(value): - self._tokenizer = ForLoop() - if index == 1 and self._is_empty(value): - return [(value, SYNTAX)] - return _Table._tokenize(self, value, index) - - def _is_setting(self, value): - return value.startswith('[') and value.endswith(']') - - def _is_template(self, value): - return normalize(value) == '[template]' - - def _is_for_loop(self, value): - return value.startswith(':') and normalize(value, remove=':') == 'for' - - def set_test_template(self, template): - self._test_template = self._is_template_set(template) - - def set_default_template(self, template): - self._default_template = self._is_template_set(template) - - def _is_template_set(self, template): - return normalize(template) not in ('', '\\', 'none', '${empty}') - - -class KeywordTable(TestCaseTable): - _tokenizer_class = KeywordCall - _setting_class = KeywordSetting - - def _is_template(self, value): - return False - - -# Following code copied directly from Robot Framework 2.7.5. - -class VariableSplitter: - - def __init__(self, string, identifiers): - self.identifier = None - self.base = None - self.index = None - self.start = -1 - self.end = -1 - self._identifiers = identifiers - self._may_have_internal_variables = False - try: - self._split(string) - except ValueError: - pass - else: - self._finalize() - - def get_replaced_base(self, variables): - if self._may_have_internal_variables: - return variables.replace_string(self.base) - return self.base - - def _finalize(self): - self.identifier = self._variable_chars[0] - self.base = ''.join(self._variable_chars[2:-1]) - self.end = self.start + len(self._variable_chars) - if self._has_list_or_dict_variable_index(): - self.index = ''.join(self._list_and_dict_variable_index_chars[1:-1]) - self.end += len(self._list_and_dict_variable_index_chars) - - def _has_list_or_dict_variable_index(self): - return self._list_and_dict_variable_index_chars\ - and self._list_and_dict_variable_index_chars[-1] == ']' - - def _split(self, string): - start_index, max_index = self._find_variable(string) - self.start = start_index - self._open_curly = 1 - self._state = self._variable_state - self._variable_chars = [string[start_index], '{'] - self._list_and_dict_variable_index_chars = [] - self._string = string - start_index += 2 - for index, char in enumerate(string[start_index:]): - index += start_index # Giving start to enumerate only in Py 2.6+ - try: - self._state(char, index) - except StopIteration: - return - if index == max_index and not self._scanning_list_variable_index(): - return - - def _scanning_list_variable_index(self): - return self._state in [self._waiting_list_variable_index_state, - self._list_variable_index_state] - - def _find_variable(self, string): - max_end_index = string.rfind('}') - if max_end_index == -1: - raise ValueError('No variable end found') - if self._is_escaped(string, max_end_index): - return self._find_variable(string[:max_end_index]) - start_index = self._find_start_index(string, 1, max_end_index) - if start_index == -1: - raise ValueError('No variable start found') - return start_index, max_end_index - - def _find_start_index(self, string, start, end): - index = string.find('{', start, end) - 1 - if index < 0: - return -1 - if self._start_index_is_ok(string, index): - return index - return self._find_start_index(string, index+2, end) - - def _start_index_is_ok(self, string, index): - return string[index] in self._identifiers\ - and not self._is_escaped(string, index) - - def _is_escaped(self, string, index): - escaped = False - while index > 0 and string[index-1] == '\\': - index -= 1 - escaped = not escaped - return escaped - - def _variable_state(self, char, index): - self._variable_chars.append(char) - if char == '}' and not self._is_escaped(self._string, index): - self._open_curly -= 1 - if self._open_curly == 0: - if not self._is_list_or_dict_variable(): - raise StopIteration - self._state = self._waiting_list_variable_index_state - elif char in self._identifiers: - self._state = self._internal_variable_start_state - - def _is_list_or_dict_variable(self): - return self._variable_chars[0] in ('@','&') - - def _internal_variable_start_state(self, char, index): - self._state = self._variable_state - if char == '{': - self._variable_chars.append(char) - self._open_curly += 1 - self._may_have_internal_variables = True - else: - self._variable_state(char, index) - - def _waiting_list_variable_index_state(self, char, index): - if char != '[': - raise StopIteration - self._list_and_dict_variable_index_chars.append(char) - self._state = self._list_variable_index_state - - def _list_variable_index_state(self, char, index): - self._list_and_dict_variable_index_chars.append(char) - if char == ']': - raise StopIteration +# -*- coding: utf-8 -*- +""" + pygments.lexers.robotframework + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexer for Robot Framework. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +# Copyright 2012 Nokia Siemens Networks Oyj +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re + +from pygments.lexer import Lexer +from pygments.token import Token + +__all__ = ['RobotFrameworkLexer'] + + +HEADING = Token.Generic.Heading +SETTING = Token.Keyword.Namespace +IMPORT = Token.Name.Namespace +TC_KW_NAME = Token.Generic.Subheading +KEYWORD = Token.Name.Function +ARGUMENT = Token.String +VARIABLE = Token.Name.Variable +COMMENT = Token.Comment +SEPARATOR = Token.Punctuation +SYNTAX = Token.Punctuation +GHERKIN = Token.Generic.Emph +ERROR = Token.Error + + +def normalize(string, remove=''): + string = string.lower() + for char in remove + ' ': + if char in string: + string = string.replace(char, '') + return string + + +class RobotFrameworkLexer(Lexer): + """ + For `Robot Framework `_ test data. + + Supports both space and pipe separated plain text formats. + + .. versionadded:: 1.6 + """ + name = 'RobotFramework' + aliases = ['robotframework'] + filenames = ['*.robot'] + mimetypes = ['text/x-robotframework'] + + def __init__(self, **options): + options['tabsize'] = 2 + options['encoding'] = 'UTF-8' + Lexer.__init__(self, **options) + + def get_tokens_unprocessed(self, text): + row_tokenizer = RowTokenizer() + var_tokenizer = VariableTokenizer() + index = 0 + for row in text.splitlines(): + for value, token in row_tokenizer.tokenize(row): + for value, token in var_tokenizer.tokenize(value, token): + if value: + yield index, token, str(value) + index += len(value) + + +class VariableTokenizer: + + def tokenize(self, string, token): + var = VariableSplitter(string, identifiers='$@%&') + if var.start < 0 or token in (COMMENT, ERROR): + yield string, token + return + for value, token in self._tokenize(var, string, token): + if value: + yield value, token + + def _tokenize(self, var, string, orig_token): + before = string[:var.start] + yield before, orig_token + yield var.identifier + '{', SYNTAX + yield from self.tokenize(var.base, VARIABLE) + yield '}', SYNTAX + if var.index: + yield '[', SYNTAX + yield from self.tokenize(var.index, VARIABLE) + yield ']', SYNTAX + yield from self.tokenize(string[var.end:], orig_token) + + +class RowTokenizer: + + def __init__(self): + self._table = UnknownTable() + self._splitter = RowSplitter() + testcases = TestCaseTable() + settings = SettingTable(testcases.set_default_template) + variables = VariableTable() + keywords = KeywordTable() + self._tables = {'settings': settings, 'setting': settings, + 'metadata': settings, + 'variables': variables, 'variable': variables, + 'testcases': testcases, 'testcase': testcases, + 'tasks': testcases, 'task': testcases, + 'keywords': keywords, 'keyword': keywords, + 'userkeywords': keywords, 'userkeyword': keywords} + + def tokenize(self, row): + commented = False + heading = False + for index, value in enumerate(self._splitter.split(row)): + # First value, and every second after that, is a separator. + index, separator = divmod(index-1, 2) + if value.startswith('#'): + commented = True + elif index == 0 and value.startswith('*'): + self._table = self._start_table(value) + heading = True + yield from self._tokenize(value, index, commented, + separator, heading) + self._table.end_row() + + def _start_table(self, header): + name = normalize(header, remove='*') + return self._tables.get(name, UnknownTable()) + + def _tokenize(self, value, index, commented, separator, heading): + if commented: + yield value, COMMENT + elif separator: + yield value, SEPARATOR + elif heading: + yield value, HEADING + else: + yield from self._table.tokenize(value, index) + + +class RowSplitter: + _space_splitter = re.compile('( {2,})') + _pipe_splitter = re.compile(r'((?:^| +)\|(?: +|$))') + + def split(self, row): + splitter = (row.startswith('| ') and self._split_from_pipes + or self._split_from_spaces) + yield from splitter(row) + yield '\n' + + def _split_from_spaces(self, row): + yield '' # Start with (pseudo)separator similarly as with pipes + yield from self._space_splitter.split(row) + + def _split_from_pipes(self, row): + _, separator, rest = self._pipe_splitter.split(row, 1) + yield separator + while self._pipe_splitter.search(rest): + cell, separator, rest = self._pipe_splitter.split(rest, 1) + yield cell + yield separator + yield rest + + +class Tokenizer: + _tokens = None + + def __init__(self): + self._index = 0 + + def tokenize(self, value): + values_and_tokens = self._tokenize(value, self._index) + self._index += 1 + if isinstance(values_and_tokens, type(Token)): + values_and_tokens = [(value, values_and_tokens)] + return values_and_tokens + + def _tokenize(self, value, index): + index = min(index, len(self._tokens) - 1) + return self._tokens[index] + + def _is_assign(self, value): + if value.endswith('='): + value = value[:-1].strip() + var = VariableSplitter(value, identifiers='$@&') + return var.start == 0 and var.end == len(value) + + +class Comment(Tokenizer): + _tokens = (COMMENT,) + + +class Setting(Tokenizer): + _tokens = (SETTING, ARGUMENT) + _keyword_settings = ('suitesetup', 'suiteprecondition', 'suiteteardown', + 'suitepostcondition', 'testsetup', 'tasksetup', 'testprecondition', + 'testteardown','taskteardown', 'testpostcondition', 'testtemplate', 'tasktemplate') + _import_settings = ('library', 'resource', 'variables') + _other_settings = ('documentation', 'metadata', 'forcetags', 'defaulttags', + 'testtimeout','tasktimeout') + _custom_tokenizer = None + + def __init__(self, template_setter=None): + Tokenizer.__init__(self) + self._template_setter = template_setter + + def _tokenize(self, value, index): + if index == 1 and self._template_setter: + self._template_setter(value) + if index == 0: + normalized = normalize(value) + if normalized in self._keyword_settings: + self._custom_tokenizer = KeywordCall(support_assign=False) + elif normalized in self._import_settings: + self._custom_tokenizer = ImportSetting() + elif normalized not in self._other_settings: + return ERROR + elif self._custom_tokenizer: + return self._custom_tokenizer.tokenize(value) + return Tokenizer._tokenize(self, value, index) + + +class ImportSetting(Tokenizer): + _tokens = (IMPORT, ARGUMENT) + + +class TestCaseSetting(Setting): + _keyword_settings = ('setup', 'precondition', 'teardown', 'postcondition', + 'template') + _import_settings = () + _other_settings = ('documentation', 'tags', 'timeout') + + def _tokenize(self, value, index): + if index == 0: + type = Setting._tokenize(self, value[1:-1], index) + return [('[', SYNTAX), (value[1:-1], type), (']', SYNTAX)] + return Setting._tokenize(self, value, index) + + +class KeywordSetting(TestCaseSetting): + _keyword_settings = ('teardown',) + _other_settings = ('documentation', 'arguments', 'return', 'timeout', 'tags') + + +class Variable(Tokenizer): + _tokens = (SYNTAX, ARGUMENT) + + def _tokenize(self, value, index): + if index == 0 and not self._is_assign(value): + return ERROR + return Tokenizer._tokenize(self, value, index) + + +class KeywordCall(Tokenizer): + _tokens = (KEYWORD, ARGUMENT) + + def __init__(self, support_assign=True): + Tokenizer.__init__(self) + self._keyword_found = not support_assign + self._assigns = 0 + + def _tokenize(self, value, index): + if not self._keyword_found and self._is_assign(value): + self._assigns += 1 + return SYNTAX # VariableTokenizer tokenizes this later. + if self._keyword_found: + return Tokenizer._tokenize(self, value, index - self._assigns) + self._keyword_found = True + return GherkinTokenizer().tokenize(value, KEYWORD) + + +class GherkinTokenizer: + _gherkin_prefix = re.compile('^(Given|When|Then|And) ', re.IGNORECASE) + + def tokenize(self, value, token): + match = self._gherkin_prefix.match(value) + if not match: + return [(value, token)] + end = match.end() + return [(value[:end], GHERKIN), (value[end:], token)] + + +class TemplatedKeywordCall(Tokenizer): + _tokens = (ARGUMENT,) + + +class ForLoop(Tokenizer): + + def __init__(self): + Tokenizer.__init__(self) + self._in_arguments = False + + def _tokenize(self, value, index): + token = self._in_arguments and ARGUMENT or SYNTAX + if value.upper() in ('IN', 'IN RANGE'): + self._in_arguments = True + return token + + +class _Table: + _tokenizer_class = None + + def __init__(self, prev_tokenizer=None): + self._tokenizer = self._tokenizer_class() + self._prev_tokenizer = prev_tokenizer + self._prev_values_on_row = [] + + def tokenize(self, value, index): + if self._continues(value, index): + self._tokenizer = self._prev_tokenizer + yield value, SYNTAX + else: + yield from self._tokenize(value, index) + self._prev_values_on_row.append(value) + + def _continues(self, value, index): + return value == '...' and all(self._is_empty(t) + for t in self._prev_values_on_row) + + def _is_empty(self, value): + return value in ('', '\\') + + def _tokenize(self, value, index): + return self._tokenizer.tokenize(value) + + def end_row(self): + self.__init__(prev_tokenizer=self._tokenizer) + + +class UnknownTable(_Table): + _tokenizer_class = Comment + + def _continues(self, value, index): + return False + + +class VariableTable(_Table): + _tokenizer_class = Variable + + +class SettingTable(_Table): + _tokenizer_class = Setting + + def __init__(self, template_setter, prev_tokenizer=None): + _Table.__init__(self, prev_tokenizer) + self._template_setter = template_setter + + def _tokenize(self, value, index): + if index == 0 and normalize(value) == 'testtemplate': + self._tokenizer = Setting(self._template_setter) + return _Table._tokenize(self, value, index) + + def end_row(self): + self.__init__(self._template_setter, prev_tokenizer=self._tokenizer) + + +class TestCaseTable(_Table): + _setting_class = TestCaseSetting + _test_template = None + _default_template = None + + @property + def _tokenizer_class(self): + if self._test_template or (self._default_template and + self._test_template is not False): + return TemplatedKeywordCall + return KeywordCall + + def _continues(self, value, index): + return index > 0 and _Table._continues(self, value, index) + + def _tokenize(self, value, index): + if index == 0: + if value: + self._test_template = None + return GherkinTokenizer().tokenize(value, TC_KW_NAME) + if index == 1 and self._is_setting(value): + if self._is_template(value): + self._test_template = False + self._tokenizer = self._setting_class(self.set_test_template) + else: + self._tokenizer = self._setting_class() + if index == 1 and self._is_for_loop(value): + self._tokenizer = ForLoop() + if index == 1 and self._is_empty(value): + return [(value, SYNTAX)] + return _Table._tokenize(self, value, index) + + def _is_setting(self, value): + return value.startswith('[') and value.endswith(']') + + def _is_template(self, value): + return normalize(value) == '[template]' + + def _is_for_loop(self, value): + return value.startswith(':') and normalize(value, remove=':') == 'for' + + def set_test_template(self, template): + self._test_template = self._is_template_set(template) + + def set_default_template(self, template): + self._default_template = self._is_template_set(template) + + def _is_template_set(self, template): + return normalize(template) not in ('', '\\', 'none', '${empty}') + + +class KeywordTable(TestCaseTable): + _tokenizer_class = KeywordCall + _setting_class = KeywordSetting + + def _is_template(self, value): + return False + + +# Following code copied directly from Robot Framework 2.7.5. + +class VariableSplitter: + + def __init__(self, string, identifiers): + self.identifier = None + self.base = None + self.index = None + self.start = -1 + self.end = -1 + self._identifiers = identifiers + self._may_have_internal_variables = False + try: + self._split(string) + except ValueError: + pass + else: + self._finalize() + + def get_replaced_base(self, variables): + if self._may_have_internal_variables: + return variables.replace_string(self.base) + return self.base + + def _finalize(self): + self.identifier = self._variable_chars[0] + self.base = ''.join(self._variable_chars[2:-1]) + self.end = self.start + len(self._variable_chars) + if self._has_list_or_dict_variable_index(): + self.index = ''.join(self._list_and_dict_variable_index_chars[1:-1]) + self.end += len(self._list_and_dict_variable_index_chars) + + def _has_list_or_dict_variable_index(self): + return self._list_and_dict_variable_index_chars\ + and self._list_and_dict_variable_index_chars[-1] == ']' + + def _split(self, string): + start_index, max_index = self._find_variable(string) + self.start = start_index + self._open_curly = 1 + self._state = self._variable_state + self._variable_chars = [string[start_index], '{'] + self._list_and_dict_variable_index_chars = [] + self._string = string + start_index += 2 + for index, char in enumerate(string[start_index:]): + index += start_index # Giving start to enumerate only in Py 2.6+ + try: + self._state(char, index) + except StopIteration: + return + if index == max_index and not self._scanning_list_variable_index(): + return + + def _scanning_list_variable_index(self): + return self._state in [self._waiting_list_variable_index_state, + self._list_variable_index_state] + + def _find_variable(self, string): + max_end_index = string.rfind('}') + if max_end_index == -1: + raise ValueError('No variable end found') + if self._is_escaped(string, max_end_index): + return self._find_variable(string[:max_end_index]) + start_index = self._find_start_index(string, 1, max_end_index) + if start_index == -1: + raise ValueError('No variable start found') + return start_index, max_end_index + + def _find_start_index(self, string, start, end): + index = string.find('{', start, end) - 1 + if index < 0: + return -1 + if self._start_index_is_ok(string, index): + return index + return self._find_start_index(string, index+2, end) + + def _start_index_is_ok(self, string, index): + return string[index] in self._identifiers\ + and not self._is_escaped(string, index) + + def _is_escaped(self, string, index): + escaped = False + while index > 0 and string[index-1] == '\\': + index -= 1 + escaped = not escaped + return escaped + + def _variable_state(self, char, index): + self._variable_chars.append(char) + if char == '}' and not self._is_escaped(self._string, index): + self._open_curly -= 1 + if self._open_curly == 0: + if not self._is_list_or_dict_variable(): + raise StopIteration + self._state = self._waiting_list_variable_index_state + elif char in self._identifiers: + self._state = self._internal_variable_start_state + + def _is_list_or_dict_variable(self): + return self._variable_chars[0] in ('@','&') + + def _internal_variable_start_state(self, char, index): + self._state = self._variable_state + if char == '{': + self._variable_chars.append(char) + self._open_curly += 1 + self._may_have_internal_variables = True + else: + self._variable_state(char, index) + + def _waiting_list_variable_index_state(self, char, index): + if char != '[': + raise StopIteration + self._list_and_dict_variable_index_chars.append(char) + self._state = self._list_variable_index_state + + def _list_variable_index_state(self, char, index): + self._list_and_dict_variable_index_chars.append(char) + if char == ']': + raise StopIteration diff --git a/pygments/lexers/ruby.py b/pygments/lexers/ruby.py old mode 100644 new mode 100755 index 8bcbde6..a48c086 --- a/pygments/lexers/ruby.py +++ b/pygments/lexers/ruby.py @@ -1,519 +1,517 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.ruby - ~~~~~~~~~~~~~~~~~~~~ - - Lexers for Ruby and related languages. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import Lexer, RegexLexer, ExtendedRegexLexer, include, \ - bygroups, default, LexerContext, do_insertions, words -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Error, Generic -from pygments.util import shebang_matches - -__all__ = ['RubyLexer', 'RubyConsoleLexer', 'FancyLexer'] - -line_re = re.compile('.*?\n') - - -RUBY_OPERATORS = ( - '*', '**', '-', '+', '-@', '+@', '/', '%', '&', '|', '^', '`', '~', - '[]', '[]=', '<<', '>>', '<', '<>', '<=>', '>', '>=', '==', '===' -) - - -class RubyLexer(ExtendedRegexLexer): - """ - For `Ruby `_ source code. - """ - - name = 'Ruby' - aliases = ['rb', 'ruby', 'duby'] - filenames = ['*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', - '*.rbx', '*.duby', 'Gemfile'] - mimetypes = ['text/x-ruby', 'application/x-ruby'] - - flags = re.DOTALL | re.MULTILINE - - def heredoc_callback(self, match, ctx): - # okay, this is the hardest part of parsing Ruby... - # match: 1 = <<[-~]?, 2 = quote? 3 = name 4 = quote? 5 = rest of line - - start = match.start(1) - yield start, Operator, match.group(1) # <<[-~]? - yield match.start(2), String.Heredoc, match.group(2) # quote ", ', ` - yield match.start(3), String.Delimiter, match.group(3) # heredoc name - yield match.start(4), String.Heredoc, match.group(4) # quote again - - heredocstack = ctx.__dict__.setdefault('heredocstack', []) - outermost = not bool(heredocstack) - heredocstack.append((match.group(1) in ('<<-', '<<~'), match.group(3))) - - ctx.pos = match.start(5) - ctx.end = match.end(5) - # this may find other heredocs - for i, t, v in self.get_tokens_unprocessed(context=ctx): - yield i, t, v - ctx.pos = match.end() - - if outermost: - # this is the outer heredoc again, now we can process them all - for tolerant, hdname in heredocstack: - lines = [] - for match in line_re.finditer(ctx.text, ctx.pos): - if tolerant: - check = match.group().strip() - else: - check = match.group().rstrip() - if check == hdname: - for amatch in lines: - yield amatch.start(), String.Heredoc, amatch.group() - yield match.start(), String.Delimiter, match.group() - ctx.pos = match.end() - break - else: - lines.append(match) - else: - # end of heredoc not found -- error! - for amatch in lines: - yield amatch.start(), Error, amatch.group() - ctx.end = len(ctx.text) - del heredocstack[:] - - def gen_rubystrings_rules(): - def intp_regex_callback(self, match, ctx): - yield match.start(1), String.Regex, match.group(1) # begin - nctx = LexerContext(match.group(3), 0, ['interpolated-regex']) - for i, t, v in self.get_tokens_unprocessed(context=nctx): - yield match.start(3)+i, t, v - yield match.start(4), String.Regex, match.group(4) # end[mixounse]* - ctx.pos = match.end() - - def intp_string_callback(self, match, ctx): - yield match.start(1), String.Other, match.group(1) - nctx = LexerContext(match.group(3), 0, ['interpolated-string']) - for i, t, v in self.get_tokens_unprocessed(context=nctx): - yield match.start(3)+i, t, v - yield match.start(4), String.Other, match.group(4) # end - ctx.pos = match.end() - - states = {} - states['strings'] = [ - # easy ones - (r'\:@{0,2}[a-zA-Z_]\w*[!?]?', String.Symbol), - (words(RUBY_OPERATORS, prefix=r'\:@{0,2}'), String.Symbol), - (r":'(\\\\|\\'|[^'])*'", String.Symbol), - (r"'(\\\\|\\'|[^'])*'", String.Single), - (r':"', String.Symbol, 'simple-sym'), - (r'([a-zA-Z_]\w*)(:)(?!:)', - bygroups(String.Symbol, Punctuation)), # Since Ruby 1.9 - (r'"', String.Double, 'simple-string'), - (r'(?', '<>', 'ab'): - states[name+'-intp-string'] = [ - (r'\\[\\' + bracecc + ']', String.Other), - (lbrace, String.Other, '#push'), - (rbrace, String.Other, '#pop'), - include('string-intp-escaped'), - (r'[\\#' + bracecc + ']', String.Other), - (r'[^\\#' + bracecc + ']+', String.Other), - ] - states['strings'].append((r'%[QWx]?' + lbrace, String.Other, - name+'-intp-string')) - states[name+'-string'] = [ - (r'\\[\\' + bracecc + ']', String.Other), - (lbrace, String.Other, '#push'), - (rbrace, String.Other, '#pop'), - (r'[\\#' + bracecc + ']', String.Other), - (r'[^\\#' + bracecc + ']+', String.Other), - ] - states['strings'].append((r'%[qsw]' + lbrace, String.Other, - name+'-string')) - states[name+'-regex'] = [ - (r'\\[\\' + bracecc + ']', String.Regex), - (lbrace, String.Regex, '#push'), - (rbrace + '[mixounse]*', String.Regex, '#pop'), - include('string-intp'), - (r'[\\#' + bracecc + ']', String.Regex), - (r'[^\\#' + bracecc + ']+', String.Regex), - ] - states['strings'].append((r'%r' + lbrace, String.Regex, - name+'-regex')) - - # these must come after %! - states['strings'] += [ - # %r regex - (r'(%r([\W_]))((?:\\\2|(?!\2).)*)(\2[mixounse]*)', - intp_regex_callback), - # regular fancy strings with qsw - (r'%[qsw]([\W_])((?:\\\1|(?!\1).)*)\1', String.Other), - (r'(%[QWx]([\W_]))((?:\\\2|(?!\2).)*)(\2)', - intp_string_callback), - # special forms of fancy strings after operators or - # in method calls with braces - (r'(?<=[-+/*%=<>&!^|~,(])(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)', - bygroups(Text, String.Other, None)), - # and because of fixed width lookbehinds the whole thing a - # second time for line startings... - (r'^(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)', - bygroups(Text, String.Other, None)), - # all regular fancy strings without qsw - (r'(%([^a-zA-Z0-9\s]))((?:\\\2|(?!\2).)*)(\2)', - intp_string_callback), - ] - - return states - - tokens = { - 'root': [ - (r'\A#!.+?$', Comment.Hashbang), - (r'#.*?$', Comment.Single), - (r'=begin\s.*?\n=end.*?$', Comment.Multiline), - # keywords - (words(( - 'BEGIN', 'END', 'alias', 'begin', 'break', 'case', 'defined?', - 'do', 'else', 'elsif', 'end', 'ensure', 'for', 'if', 'in', 'next', 'redo', - 'rescue', 'raise', 'retry', 'return', 'super', 'then', 'undef', - 'unless', 'until', 'when', 'while', 'yield'), suffix=r'\b'), - Keyword), - # start of function, class and module names - (r'(module)(\s+)([a-zA-Z_]\w*' - r'(?:::[a-zA-Z_]\w*)*)', - bygroups(Keyword, Text, Name.Namespace)), - (r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'), - (r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'), - (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), - # special methods - (words(( - 'initialize', 'new', 'loop', 'include', 'extend', 'raise', 'attr_reader', - 'attr_writer', 'attr_accessor', 'attr', 'catch', 'throw', 'private', - 'module_function', 'public', 'protected', 'true', 'false', 'nil'), - suffix=r'\b'), - Keyword.Pseudo), - (r'(not|and|or)\b', Operator.Word), - (words(( - 'autoload', 'block_given', 'const_defined', 'eql', 'equal', 'frozen', 'include', - 'instance_of', 'is_a', 'iterator', 'kind_of', 'method_defined', 'nil', - 'private_method_defined', 'protected_method_defined', - 'public_method_defined', 'respond_to', 'tainted'), suffix=r'\?'), - Name.Builtin), - (r'(chomp|chop|exit|gsub|sub)!', Name.Builtin), - (words(( - 'Array', 'Float', 'Integer', 'String', '__id__', '__send__', 'abort', - 'ancestors', 'at_exit', 'autoload', 'binding', 'callcc', 'caller', - 'catch', 'chomp', 'chop', 'class_eval', 'class_variables', - 'clone', 'const_defined?', 'const_get', 'const_missing', 'const_set', - 'constants', 'display', 'dup', 'eval', 'exec', 'exit', 'extend', 'fail', 'fork', - 'format', 'freeze', 'getc', 'gets', 'global_variables', 'gsub', - 'hash', 'id', 'included_modules', 'inspect', 'instance_eval', - 'instance_method', 'instance_methods', - 'instance_variable_get', 'instance_variable_set', 'instance_variables', - 'lambda', 'load', 'local_variables', 'loop', - 'method', 'method_missing', 'methods', 'module_eval', 'name', - 'object_id', 'open', 'p', 'print', 'printf', 'private_class_method', - 'private_instance_methods', - 'private_methods', 'proc', 'protected_instance_methods', - 'protected_methods', 'public_class_method', - 'public_instance_methods', 'public_methods', - 'putc', 'puts', 'raise', 'rand', 'readline', 'readlines', 'require', - 'scan', 'select', 'self', 'send', 'set_trace_func', 'singleton_methods', 'sleep', - 'split', 'sprintf', 'srand', 'sub', 'syscall', 'system', 'taint', - 'test', 'throw', 'to_a', 'to_s', 'trace_var', 'trap', 'untaint', - 'untrace_var', 'warn'), prefix=r'(?~!:])|' - r'(?<=(?:\s|;)when\s)|' - r'(?<=(?:\s|;)or\s)|' - r'(?<=(?:\s|;)and\s)|' - r'(?<=\.index\s)|' - r'(?<=\.scan\s)|' - r'(?<=\.sub\s)|' - r'(?<=\.sub!\s)|' - r'(?<=\.gsub\s)|' - r'(?<=\.gsub!\s)|' - r'(?<=\.match\s)|' - r'(?<=(?:\s|;)if\s)|' - r'(?<=(?:\s|;)elsif\s)|' - r'(?<=^when\s)|' - r'(?<=^index\s)|' - r'(?<=^scan\s)|' - r'(?<=^sub\s)|' - r'(?<=^gsub\s)|' - r'(?<=^sub!\s)|' - r'(?<=^gsub!\s)|' - r'(?<=^match\s)|' - r'(?<=^if\s)|' - r'(?<=^elsif\s)' - r')(\s*)(/)', bygroups(Text, String.Regex), 'multiline-regex'), - # multiline regex (in method calls or subscripts) - (r'(?<=\(|,|\[)/', String.Regex, 'multiline-regex'), - # multiline regex (this time the funny no whitespace rule) - (r'(\s+)(/)(?![\s=])', bygroups(Text, String.Regex), - 'multiline-regex'), - # lex numbers and ignore following regular expressions which - # are division operators in fact (grrrr. i hate that. any - # better ideas?) - # since pygments 0.7 we also eat a "?" operator after numbers - # so that the char operator does not work. Chars are not allowed - # there so that you can use the ternary operator. - # stupid example: - # x>=0?n[x]:"" - (r'(0_?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?', - bygroups(Number.Oct, Text, Operator)), - (r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?', - bygroups(Number.Hex, Text, Operator)), - (r'(0b[01]+(?:_[01]+)*)(\s*)([/?])?', - bygroups(Number.Bin, Text, Operator)), - (r'([\d]+(?:_\d+)*)(\s*)([/?])?', - bygroups(Number.Integer, Text, Operator)), - # Names - (r'@@[a-zA-Z_]\w*', Name.Variable.Class), - (r'@[a-zA-Z_]\w*', Name.Variable.Instance), - (r'\$\w+', Name.Variable.Global), - (r'\$[!@&`\'+~=/\\,;.<>_*$?:"^-]', Name.Variable.Global), - (r'\$-[0adFiIlpvw]', Name.Variable.Global), - (r'::', Operator), - include('strings'), - # chars - (r'\?(\\[MC]-)*' # modifiers - r'(\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})|\S)' - r'(?!\w)', - String.Char), - (r'[A-Z]\w+', Name.Constant), - # this is needed because ruby attributes can look - # like keywords (class) or like this: ` ?!? - (words(RUBY_OPERATORS, prefix=r'(\.|::)'), - bygroups(Operator, Name.Operator)), - (r'(\.|::)([a-zA-Z_]\w*[!?]?|[*%&^`~+\-/\[<>=])', - bygroups(Operator, Name)), - (r'[a-zA-Z_]\w*[!?]?', Name), - (r'(\[|\]|\*\*|<>?|>=|<=|<=>|=~|={3}|' - r'!~|&&?|\|\||\.{1,3})', Operator), - (r'[-+/*%=<>&!^|~]=?', Operator), - (r'[(){};,/?:\\]', Punctuation), - (r'\s+', Text) - ], - 'funcname': [ - (r'\(', Punctuation, 'defexpr'), - (r'(?:([a-zA-Z_]\w*)(\.))?' - r'([a-zA-Z_]\w*[!?]?|\*\*?|[-+]@?|' - r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)', - bygroups(Name.Class, Operator, Name.Function), '#pop'), - default('#pop') - ], - 'classname': [ - (r'\(', Punctuation, 'defexpr'), - (r'<<', Operator, '#pop'), - (r'[A-Z_]\w*', Name.Class, '#pop'), - default('#pop') - ], - 'defexpr': [ - (r'(\))(\.|::)?', bygroups(Punctuation, Operator), '#pop'), - (r'\(', Operator, '#push'), - include('root') - ], - 'in-intp': [ - (r'\{', String.Interpol, '#push'), - (r'\}', String.Interpol, '#pop'), - include('root'), - ], - 'string-intp': [ - (r'#\{', String.Interpol, 'in-intp'), - (r'#@@?[a-zA-Z_]\w*', String.Interpol), - (r'#\$[a-zA-Z_]\w*', String.Interpol) - ], - 'string-intp-escaped': [ - include('string-intp'), - (r'\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})', - String.Escape) - ], - 'interpolated-regex': [ - include('string-intp'), - (r'[\\#]', String.Regex), - (r'[^\\#]+', String.Regex), - ], - 'interpolated-string': [ - include('string-intp'), - (r'[\\#]', String.Other), - (r'[^\\#]+', String.Other), - ], - 'multiline-regex': [ - include('string-intp'), - (r'\\\\', String.Regex), - (r'\\/', String.Regex), - (r'[\\#]', String.Regex), - (r'[^\\/#]+', String.Regex), - (r'/[mixounse]*', String.Regex, '#pop'), - ], - 'end-part': [ - (r'.+', Comment.Preproc, '#pop') - ] - } - tokens.update(gen_rubystrings_rules()) - - def analyse_text(text): - return shebang_matches(text, r'ruby(1\.\d)?') - - -class RubyConsoleLexer(Lexer): - """ - For Ruby interactive console (**irb**) output like: - - .. sourcecode:: rbcon - - irb(main):001:0> a = 1 - => 1 - irb(main):002:0> puts a - 1 - => nil - """ - name = 'Ruby irb session' - aliases = ['rbcon', 'irb'] - mimetypes = ['text/x-ruby-shellsession'] - - _prompt_re = re.compile(r'irb\([a-zA-Z_]\w*\):\d{3}:\d+[>*"\'] ' - r'|>> |\?> ') - - def get_tokens_unprocessed(self, text): - rblexer = RubyLexer(**self.options) - - curcode = '' - insertions = [] - for match in line_re.finditer(text): - line = match.group() - m = self._prompt_re.match(line) - if m is not None: - end = m.end() - insertions.append((len(curcode), - [(0, Generic.Prompt, line[:end])])) - curcode += line[end:] - else: - if curcode: - for item in do_insertions( - insertions, rblexer.get_tokens_unprocessed(curcode)): - yield item - curcode = '' - insertions = [] - yield match.start(), Generic.Output, line - if curcode: - for item in do_insertions( - insertions, rblexer.get_tokens_unprocessed(curcode)): - yield item - - -class FancyLexer(RegexLexer): - """ - Pygments Lexer For `Fancy `_. - - Fancy is a self-hosted, pure object-oriented, dynamic, - class-based, concurrent general-purpose programming language - running on Rubinius, the Ruby VM. - - .. versionadded:: 1.5 - """ - name = 'Fancy' - filenames = ['*.fy', '*.fancypack'] - aliases = ['fancy', 'fy'] - mimetypes = ['text/x-fancysrc'] - - tokens = { - # copied from PerlLexer: - 'balanced-regex': [ - (r'/(\\\\|\\/|[^/])*/[egimosx]*', String.Regex, '#pop'), - (r'!(\\\\|\\!|[^!])*![egimosx]*', String.Regex, '#pop'), - (r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'), - (r'\{(\\\\|\\\}|[^}])*\}[egimosx]*', String.Regex, '#pop'), - (r'<(\\\\|\\>|[^>])*>[egimosx]*', String.Regex, '#pop'), - (r'\[(\\\\|\\\]|[^\]])*\][egimosx]*', String.Regex, '#pop'), - (r'\((\\\\|\\\)|[^)])*\)[egimosx]*', String.Regex, '#pop'), - (r'@(\\\\|\\@|[^@])*@[egimosx]*', String.Regex, '#pop'), - (r'%(\\\\|\\%|[^%])*%[egimosx]*', String.Regex, '#pop'), - (r'\$(\\\\|\\\$|[^$])*\$[egimosx]*', String.Regex, '#pop'), - ], - 'root': [ - (r'\s+', Text), - - # balanced delimiters (copied from PerlLexer): - (r's\{(\\\\|\\\}|[^}])*\}\s*', String.Regex, 'balanced-regex'), - (r's<(\\\\|\\>|[^>])*>\s*', String.Regex, 'balanced-regex'), - (r's\[(\\\\|\\\]|[^\]])*\]\s*', String.Regex, 'balanced-regex'), - (r's\((\\\\|\\\)|[^)])*\)\s*', String.Regex, 'balanced-regex'), - (r'm?/(\\\\|\\/|[^/\n])*/[gcimosx]*', String.Regex), - (r'm(?=[/!\\{<\[(@%$])', String.Regex, 'balanced-regex'), - - # Comments - (r'#(.*?)\n', Comment.Single), - # Symbols - (r'\'([^\'\s\[\](){}]+|\[\])', String.Symbol), - # Multi-line DoubleQuotedString - (r'"""(\\\\|\\"|[^"])*"""', String), - # DoubleQuotedString - (r'"(\\\\|\\"|[^"])*"', String), - # keywords - (r'(def|class|try|catch|finally|retry|return|return_local|match|' - r'case|->|=>)\b', Keyword), - # constants - (r'(self|super|nil|false|true)\b', Name.Constant), - (r'[(){};,/?|:\\]', Punctuation), - # names - (words(( - 'Object', 'Array', 'Hash', 'Directory', 'File', 'Class', 'String', - 'Number', 'Enumerable', 'FancyEnumerable', 'Block', 'TrueClass', - 'NilClass', 'FalseClass', 'Tuple', 'Symbol', 'Stack', 'Set', - 'FancySpec', 'Method', 'Package', 'Range'), suffix=r'\b'), - Name.Builtin), - # functions - (r'[a-zA-Z](\w|[-+?!=*/^><%])*:', Name.Function), - # operators, must be below functions - (r'[-+*/~,<>=&!?%^\[\].$]+', Operator), - (r'[A-Z]\w*', Name.Constant), - (r'@[a-zA-Z_]\w*', Name.Variable.Instance), - (r'@@[a-zA-Z_]\w*', Name.Variable.Class), - ('@@?', Operator), - (r'[a-zA-Z_]\w*', Name), - # numbers - / checks are necessary to avoid mismarking regexes, - # see comment in RubyLexer - (r'(0[oO]?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?', - bygroups(Number.Oct, Text, Operator)), - (r'(0[xX][0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?', - bygroups(Number.Hex, Text, Operator)), - (r'(0[bB][01]+(?:_[01]+)*)(\s*)([/?])?', - bygroups(Number.Bin, Text, Operator)), - (r'([\d]+(?:_\d+)*)(\s*)([/?])?', - bygroups(Number.Integer, Text, Operator)), - (r'\d+([eE][+-]?[0-9]+)|\d+\.\d+([eE][+-]?[0-9]+)?', Number.Float), - (r'\d+', Number.Integer) - ] - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.ruby + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for Ruby and related languages. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import Lexer, RegexLexer, ExtendedRegexLexer, include, \ + bygroups, default, LexerContext, do_insertions, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error, Generic +from pygments.util import shebang_matches + +__all__ = ['RubyLexer', 'RubyConsoleLexer', 'FancyLexer'] + +line_re = re.compile('.*?\n') + + +RUBY_OPERATORS = ( + '*', '**', '-', '+', '-@', '+@', '/', '%', '&', '|', '^', '`', '~', + '[]', '[]=', '<<', '>>', '<', '<>', '<=>', '>', '>=', '==', '===' +) + + +class RubyLexer(ExtendedRegexLexer): + """ + For `Ruby `_ source code. + """ + + name = 'Ruby' + aliases = ['rb', 'ruby', 'duby'] + filenames = ['*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', + '*.rbx', '*.duby', 'Gemfile'] + mimetypes = ['text/x-ruby', 'application/x-ruby'] + + flags = re.DOTALL | re.MULTILINE + + def heredoc_callback(self, match, ctx): + # okay, this is the hardest part of parsing Ruby... + # match: 1 = <<[-~]?, 2 = quote? 3 = name 4 = quote? 5 = rest of line + + start = match.start(1) + yield start, Operator, match.group(1) # <<[-~]? + yield match.start(2), String.Heredoc, match.group(2) # quote ", ', ` + yield match.start(3), String.Delimiter, match.group(3) # heredoc name + yield match.start(4), String.Heredoc, match.group(4) # quote again + + heredocstack = ctx.__dict__.setdefault('heredocstack', []) + outermost = not bool(heredocstack) + heredocstack.append((match.group(1) in ('<<-', '<<~'), match.group(3))) + + ctx.pos = match.start(5) + ctx.end = match.end(5) + # this may find other heredocs + yield from self.get_tokens_unprocessed(context=ctx) + ctx.pos = match.end() + + if outermost: + # this is the outer heredoc again, now we can process them all + for tolerant, hdname in heredocstack: + lines = [] + for match in line_re.finditer(ctx.text, ctx.pos): + if tolerant: + check = match.group().strip() + else: + check = match.group().rstrip() + if check == hdname: + for amatch in lines: + yield amatch.start(), String.Heredoc, amatch.group() + yield match.start(), String.Delimiter, match.group() + ctx.pos = match.end() + break + else: + lines.append(match) + else: + # end of heredoc not found -- error! + for amatch in lines: + yield amatch.start(), Error, amatch.group() + ctx.end = len(ctx.text) + del heredocstack[:] + + def gen_rubystrings_rules(): + def intp_regex_callback(self, match, ctx): + yield match.start(1), String.Regex, match.group(1) # begin + nctx = LexerContext(match.group(3), 0, ['interpolated-regex']) + for i, t, v in self.get_tokens_unprocessed(context=nctx): + yield match.start(3)+i, t, v + yield match.start(4), String.Regex, match.group(4) # end[mixounse]* + ctx.pos = match.end() + + def intp_string_callback(self, match, ctx): + yield match.start(1), String.Other, match.group(1) + nctx = LexerContext(match.group(3), 0, ['interpolated-string']) + for i, t, v in self.get_tokens_unprocessed(context=nctx): + yield match.start(3)+i, t, v + yield match.start(4), String.Other, match.group(4) # end + ctx.pos = match.end() + + states = {} + states['strings'] = [ + # easy ones + (r'\:@{0,2}[a-zA-Z_]\w*[!?]?', String.Symbol), + (words(RUBY_OPERATORS, prefix=r'\:@{0,2}'), String.Symbol), + (r":'(\\\\|\\'|[^'])*'", String.Symbol), + (r':"', String.Symbol, 'simple-sym'), + (r'([a-zA-Z_]\w*)(:)(?!:)', + bygroups(String.Symbol, Punctuation)), # Since Ruby 1.9 + (r'"', String.Double, 'simple-string-double'), + (r"'", String.Single, 'simple-string-single'), + (r'(?', '<>', 'ab'): + states[name+'-intp-string'] = [ + (r'\\[\\' + bracecc + ']', String.Other), + (lbrace, String.Other, '#push'), + (rbrace, String.Other, '#pop'), + include('string-intp-escaped'), + (r'[\\#' + bracecc + ']', String.Other), + (r'[^\\#' + bracecc + ']+', String.Other), + ] + states['strings'].append((r'%[QWx]?' + lbrace, String.Other, + name+'-intp-string')) + states[name+'-string'] = [ + (r'\\[\\' + bracecc + ']', String.Other), + (lbrace, String.Other, '#push'), + (rbrace, String.Other, '#pop'), + (r'[\\#' + bracecc + ']', String.Other), + (r'[^\\#' + bracecc + ']+', String.Other), + ] + states['strings'].append((r'%[qsw]' + lbrace, String.Other, + name+'-string')) + states[name+'-regex'] = [ + (r'\\[\\' + bracecc + ']', String.Regex), + (lbrace, String.Regex, '#push'), + (rbrace + '[mixounse]*', String.Regex, '#pop'), + include('string-intp'), + (r'[\\#' + bracecc + ']', String.Regex), + (r'[^\\#' + bracecc + ']+', String.Regex), + ] + states['strings'].append((r'%r' + lbrace, String.Regex, + name+'-regex')) + + # these must come after %! + states['strings'] += [ + # %r regex + (r'(%r([\W_]))((?:\\\2|(?!\2).)*)(\2[mixounse]*)', + intp_regex_callback), + # regular fancy strings with qsw + (r'%[qsw]([\W_])((?:\\\1|(?!\1).)*)\1', String.Other), + (r'(%[QWx]([\W_]))((?:\\\2|(?!\2).)*)(\2)', + intp_string_callback), + # special forms of fancy strings after operators or + # in method calls with braces + (r'(?<=[-+/*%=<>&!^|~,(])(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)', + bygroups(Text, String.Other, None)), + # and because of fixed width lookbehinds the whole thing a + # second time for line startings... + (r'^(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)', + bygroups(Text, String.Other, None)), + # all regular fancy strings without qsw + (r'(%([^a-zA-Z0-9\s]))((?:\\\2|(?!\2).)*)(\2)', + intp_string_callback), + ] + + return states + + tokens = { + 'root': [ + (r'\A#!.+?$', Comment.Hashbang), + (r'#.*?$', Comment.Single), + (r'=begin\s.*?\n=end.*?$', Comment.Multiline), + # keywords + (words(( + 'BEGIN', 'END', 'alias', 'begin', 'break', 'case', 'defined?', + 'do', 'else', 'elsif', 'end', 'ensure', 'for', 'if', 'in', 'next', 'redo', + 'rescue', 'raise', 'retry', 'return', 'super', 'then', 'undef', + 'unless', 'until', 'when', 'while', 'yield'), suffix=r'\b'), + Keyword), + # start of function, class and module names + (r'(module)(\s+)([a-zA-Z_]\w*' + r'(?:::[a-zA-Z_]\w*)*)', + bygroups(Keyword, Text, Name.Namespace)), + (r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'), + (r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'), + (r'(class)(\s+)', bygroups(Keyword, Text), 'classname'), + # special methods + (words(( + 'initialize', 'new', 'loop', 'include', 'extend', 'raise', 'attr_reader', + 'attr_writer', 'attr_accessor', 'attr', 'catch', 'throw', 'private', + 'module_function', 'public', 'protected', 'true', 'false', 'nil'), + suffix=r'\b'), + Keyword.Pseudo), + (r'(not|and|or)\b', Operator.Word), + (words(( + 'autoload', 'block_given', 'const_defined', 'eql', 'equal', 'frozen', 'include', + 'instance_of', 'is_a', 'iterator', 'kind_of', 'method_defined', 'nil', + 'private_method_defined', 'protected_method_defined', + 'public_method_defined', 'respond_to', 'tainted'), suffix=r'\?'), + Name.Builtin), + (r'(chomp|chop|exit|gsub|sub)!', Name.Builtin), + (words(( + 'Array', 'Float', 'Integer', 'String', '__id__', '__send__', 'abort', + 'ancestors', 'at_exit', 'autoload', 'binding', 'callcc', 'caller', + 'catch', 'chomp', 'chop', 'class_eval', 'class_variables', + 'clone', 'const_defined?', 'const_get', 'const_missing', 'const_set', + 'constants', 'display', 'dup', 'eval', 'exec', 'exit', 'extend', 'fail', 'fork', + 'format', 'freeze', 'getc', 'gets', 'global_variables', 'gsub', + 'hash', 'id', 'included_modules', 'inspect', 'instance_eval', + 'instance_method', 'instance_methods', + 'instance_variable_get', 'instance_variable_set', 'instance_variables', + 'lambda', 'load', 'local_variables', 'loop', + 'method', 'method_missing', 'methods', 'module_eval', 'name', + 'object_id', 'open', 'p', 'print', 'printf', 'private_class_method', + 'private_instance_methods', + 'private_methods', 'proc', 'protected_instance_methods', + 'protected_methods', 'public_class_method', + 'public_instance_methods', 'public_methods', + 'putc', 'puts', 'raise', 'rand', 'readline', 'readlines', 'require', + 'scan', 'select', 'self', 'send', 'set_trace_func', 'singleton_methods', 'sleep', + 'split', 'sprintf', 'srand', 'sub', 'syscall', 'system', 'taint', + 'test', 'throw', 'to_a', 'to_s', 'trace_var', 'trap', 'untaint', + 'untrace_var', 'warn'), prefix=r'(?~!:])|' + r'(?<=(?:\s|;)when\s)|' + r'(?<=(?:\s|;)or\s)|' + r'(?<=(?:\s|;)and\s)|' + r'(?<=\.index\s)|' + r'(?<=\.scan\s)|' + r'(?<=\.sub\s)|' + r'(?<=\.sub!\s)|' + r'(?<=\.gsub\s)|' + r'(?<=\.gsub!\s)|' + r'(?<=\.match\s)|' + r'(?<=(?:\s|;)if\s)|' + r'(?<=(?:\s|;)elsif\s)|' + r'(?<=^when\s)|' + r'(?<=^index\s)|' + r'(?<=^scan\s)|' + r'(?<=^sub\s)|' + r'(?<=^gsub\s)|' + r'(?<=^sub!\s)|' + r'(?<=^gsub!\s)|' + r'(?<=^match\s)|' + r'(?<=^if\s)|' + r'(?<=^elsif\s)' + r')(\s*)(/)', bygroups(Text, String.Regex), 'multiline-regex'), + # multiline regex (in method calls or subscripts) + (r'(?<=\(|,|\[)/', String.Regex, 'multiline-regex'), + # multiline regex (this time the funny no whitespace rule) + (r'(\s+)(/)(?![\s=])', bygroups(Text, String.Regex), + 'multiline-regex'), + # lex numbers and ignore following regular expressions which + # are division operators in fact (grrrr. i hate that. any + # better ideas?) + # since pygments 0.7 we also eat a "?" operator after numbers + # so that the char operator does not work. Chars are not allowed + # there so that you can use the ternary operator. + # stupid example: + # x>=0?n[x]:"" + (r'(0_?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?', + bygroups(Number.Oct, Text, Operator)), + (r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?', + bygroups(Number.Hex, Text, Operator)), + (r'(0b[01]+(?:_[01]+)*)(\s*)([/?])?', + bygroups(Number.Bin, Text, Operator)), + (r'([\d]+(?:_\d+)*)(\s*)([/?])?', + bygroups(Number.Integer, Text, Operator)), + # Names + (r'@@[a-zA-Z_]\w*', Name.Variable.Class), + (r'@[a-zA-Z_]\w*', Name.Variable.Instance), + (r'\$\w+', Name.Variable.Global), + (r'\$[!@&`\'+~=/\\,;.<>_*$?:"^-]', Name.Variable.Global), + (r'\$-[0adFiIlpvw]', Name.Variable.Global), + (r'::', Operator), + include('strings'), + # chars + (r'\?(\\[MC]-)*' # modifiers + r'(\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})|\S)' + r'(?!\w)', + String.Char), + (r'[A-Z]\w+', Name.Constant), + # this is needed because ruby attributes can look + # like keywords (class) or like this: ` ?!? + (words(RUBY_OPERATORS, prefix=r'(\.|::)'), + bygroups(Operator, Name.Operator)), + (r'(\.|::)([a-zA-Z_]\w*[!?]?|[*%&^`~+\-/\[<>=])', + bygroups(Operator, Name)), + (r'[a-zA-Z_]\w*[!?]?', Name), + (r'(\[|\]|\*\*|<>?|>=|<=|<=>|=~|={3}|' + r'!~|&&?|\|\||\.{1,3})', Operator), + (r'[-+/*%=<>&!^|~]=?', Operator), + (r'[(){};,/?:\\]', Punctuation), + (r'\s+', Text) + ], + 'funcname': [ + (r'\(', Punctuation, 'defexpr'), + (r'(?:([a-zA-Z_]\w*)(\.))?' + r'([a-zA-Z_]\w*[!?]?|\*\*?|[-+]@?|' + r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)', + bygroups(Name.Class, Operator, Name.Function), '#pop'), + default('#pop') + ], + 'classname': [ + (r'\(', Punctuation, 'defexpr'), + (r'<<', Operator, '#pop'), + (r'[A-Z_]\w*', Name.Class, '#pop'), + default('#pop') + ], + 'defexpr': [ + (r'(\))(\.|::)?', bygroups(Punctuation, Operator), '#pop'), + (r'\(', Operator, '#push'), + include('root') + ], + 'in-intp': [ + (r'\{', String.Interpol, '#push'), + (r'\}', String.Interpol, '#pop'), + include('root'), + ], + 'string-intp': [ + (r'#\{', String.Interpol, 'in-intp'), + (r'#@@?[a-zA-Z_]\w*', String.Interpol), + (r'#\$[a-zA-Z_]\w*', String.Interpol) + ], + 'string-intp-escaped': [ + include('string-intp'), + (r'\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})', + String.Escape) + ], + 'interpolated-regex': [ + include('string-intp'), + (r'[\\#]', String.Regex), + (r'[^\\#]+', String.Regex), + ], + 'interpolated-string': [ + include('string-intp'), + (r'[\\#]', String.Other), + (r'[^\\#]+', String.Other), + ], + 'multiline-regex': [ + include('string-intp'), + (r'\\\\', String.Regex), + (r'\\/', String.Regex), + (r'[\\#]', String.Regex), + (r'[^\\/#]+', String.Regex), + (r'/[mixounse]*', String.Regex, '#pop'), + ], + 'end-part': [ + (r'.+', Comment.Preproc, '#pop') + ] + } + tokens.update(gen_rubystrings_rules()) + + def analyse_text(text): + return shebang_matches(text, r'ruby(1\.\d)?') + + +class RubyConsoleLexer(Lexer): + """ + For Ruby interactive console (**irb**) output like: + + .. sourcecode:: rbcon + + irb(main):001:0> a = 1 + => 1 + irb(main):002:0> puts a + 1 + => nil + """ + name = 'Ruby irb session' + aliases = ['rbcon', 'irb'] + mimetypes = ['text/x-ruby-shellsession'] + + _prompt_re = re.compile(r'irb\([a-zA-Z_]\w*\):\d{3}:\d+[>*"\'] ' + r'|>> |\?> ') + + def get_tokens_unprocessed(self, text): + rblexer = RubyLexer(**self.options) + + curcode = '' + insertions = [] + for match in line_re.finditer(text): + line = match.group() + m = self._prompt_re.match(line) + if m is not None: + end = m.end() + insertions.append((len(curcode), + [(0, Generic.Prompt, line[:end])])) + curcode += line[end:] + else: + if curcode: + yield from do_insertions( + insertions, rblexer.get_tokens_unprocessed(curcode)) + curcode = '' + insertions = [] + yield match.start(), Generic.Output, line + if curcode: + yield from do_insertions( + insertions, rblexer.get_tokens_unprocessed(curcode)) + + +class FancyLexer(RegexLexer): + """ + Pygments Lexer For `Fancy `_. + + Fancy is a self-hosted, pure object-oriented, dynamic, + class-based, concurrent general-purpose programming language + running on Rubinius, the Ruby VM. + + .. versionadded:: 1.5 + """ + name = 'Fancy' + filenames = ['*.fy', '*.fancypack'] + aliases = ['fancy', 'fy'] + mimetypes = ['text/x-fancysrc'] + + tokens = { + # copied from PerlLexer: + 'balanced-regex': [ + (r'/(\\\\|\\/|[^/])*/[egimosx]*', String.Regex, '#pop'), + (r'!(\\\\|\\!|[^!])*![egimosx]*', String.Regex, '#pop'), + (r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'), + (r'\{(\\\\|\\\}|[^}])*\}[egimosx]*', String.Regex, '#pop'), + (r'<(\\\\|\\>|[^>])*>[egimosx]*', String.Regex, '#pop'), + (r'\[(\\\\|\\\]|[^\]])*\][egimosx]*', String.Regex, '#pop'), + (r'\((\\\\|\\\)|[^)])*\)[egimosx]*', String.Regex, '#pop'), + (r'@(\\\\|\\@|[^@])*@[egimosx]*', String.Regex, '#pop'), + (r'%(\\\\|\\%|[^%])*%[egimosx]*', String.Regex, '#pop'), + (r'\$(\\\\|\\\$|[^$])*\$[egimosx]*', String.Regex, '#pop'), + ], + 'root': [ + (r'\s+', Text), + + # balanced delimiters (copied from PerlLexer): + (r's\{(\\\\|\\\}|[^}])*\}\s*', String.Regex, 'balanced-regex'), + (r's<(\\\\|\\>|[^>])*>\s*', String.Regex, 'balanced-regex'), + (r's\[(\\\\|\\\]|[^\]])*\]\s*', String.Regex, 'balanced-regex'), + (r's\((\\\\|\\\)|[^)])*\)\s*', String.Regex, 'balanced-regex'), + (r'm?/(\\\\|\\/|[^/\n])*/[gcimosx]*', String.Regex), + (r'm(?=[/!\\{<\[(@%$])', String.Regex, 'balanced-regex'), + + # Comments + (r'#(.*?)\n', Comment.Single), + # Symbols + (r'\'([^\'\s\[\](){}]+|\[\])', String.Symbol), + # Multi-line DoubleQuotedString + (r'"""(\\\\|\\"|[^"])*"""', String), + # DoubleQuotedString + (r'"(\\\\|\\"|[^"])*"', String), + # keywords + (r'(def|class|try|catch|finally|retry|return|return_local|match|' + r'case|->|=>)\b', Keyword), + # constants + (r'(self|super|nil|false|true)\b', Name.Constant), + (r'[(){};,/?|:\\]', Punctuation), + # names + (words(( + 'Object', 'Array', 'Hash', 'Directory', 'File', 'Class', 'String', + 'Number', 'Enumerable', 'FancyEnumerable', 'Block', 'TrueClass', + 'NilClass', 'FalseClass', 'Tuple', 'Symbol', 'Stack', 'Set', + 'FancySpec', 'Method', 'Package', 'Range'), suffix=r'\b'), + Name.Builtin), + # functions + (r'[a-zA-Z](\w|[-+?!=*/^><%])*:', Name.Function), + # operators, must be below functions + (r'[-+*/~,<>=&!?%^\[\].$]+', Operator), + (r'[A-Z]\w*', Name.Constant), + (r'@[a-zA-Z_]\w*', Name.Variable.Instance), + (r'@@[a-zA-Z_]\w*', Name.Variable.Class), + ('@@?', Operator), + (r'[a-zA-Z_]\w*', Name), + # numbers - / checks are necessary to avoid mismarking regexes, + # see comment in RubyLexer + (r'(0[oO]?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?', + bygroups(Number.Oct, Text, Operator)), + (r'(0[xX][0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?', + bygroups(Number.Hex, Text, Operator)), + (r'(0[bB][01]+(?:_[01]+)*)(\s*)([/?])?', + bygroups(Number.Bin, Text, Operator)), + (r'([\d]+(?:_\d+)*)(\s*)([/?])?', + bygroups(Number.Integer, Text, Operator)), + (r'\d+([eE][+-]?[0-9]+)|\d+\.\d+([eE][+-]?[0-9]+)?', Number.Float), + (r'\d+', Number.Integer) + ] + } diff --git a/pygments/lexers/rust.py b/pygments/lexers/rust.py old mode 100644 new mode 100755 index 77884b4..8996784 --- a/pygments/lexers/rust.py +++ b/pygments/lexers/rust.py @@ -1,211 +1,216 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.rust - ~~~~~~~~~~~~~~~~~~~~ - - Lexers for the Rust language. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import RegexLexer, include, bygroups, words, default -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Whitespace - -__all__ = ['RustLexer'] - - -class RustLexer(RegexLexer): - """ - Lexer for the Rust programming language (version 1.40). - - .. versionadded:: 1.6 - """ - name = 'Rust' - filenames = ['*.rs', '*.rs.in'] - aliases = ['rust', 'rs'] - mimetypes = ['text/rust', 'text/x-rust'] - - keyword_types = (words(( - 'u8', 'u16', 'u32', 'u64', 'u128', 'i8', 'i16', 'i32', 'i64', 'i128', - 'usize', 'isize', 'f32', 'f64', 'char', 'str', 'bool', - ), suffix=r'\b'), Keyword.Type) - - builtin_types = (words(( - 'Send', 'Sized', 'Sync', 'Unpin', - 'Drop', 'Fn', 'FnMut', 'FnOnce', - 'AsRef', 'AsMut', 'Into', 'From', - 'Iterator', 'Extend', 'IntoIterator', 'DoubleEndedIterator', - 'ExactSizeIterator', 'Option', 'Result', - 'Box', 'ToOwned', 'String', 'ToString', 'Vec', - 'Clone', 'Copy', 'Default', 'Eq', 'Hash', 'Ord', 'PartialEq', - 'PartialOrd', 'Eq', 'Ord', - ), suffix=r'\b'), Name.Builtin) - - builtin_funcs_macros = (words(( - 'drop', 'Some', 'None', 'Ok', 'Err', - 'asm!', 'assert!', 'assert_eq!', 'assert_ne!', 'cfg!', 'column!', - 'compile_error!', 'concat!', 'concat_idents!', 'dbg!', 'debug_assert!', - 'debug_assert_eq!', 'debug_assert_ne!', 'env!', 'eprint!', 'eprintln!', - 'file!', 'format_args!', 'format_args_nl!', 'global_asm!', 'include!', - 'include_bytes!', 'include_str!', 'line!', 'log_syntax!', - 'module_path!', 'option_env!', 'panic!', 'print!', 'println!', - 'stringify!', 'thread_local!', 'todo!', 'trace_macros!', - 'unimplemented!', 'unreachable!', 'vec!', 'write!', 'writeln!', - ), suffix=r'\b'), Name.Builtin) - - tokens = { - 'root': [ - # rust allows a file to start with a shebang, but if the first line - # starts with #![ then it's not a shebang but a crate attribute. - (r'#![^[\r\n].*$', Comment.Preproc), - default('base'), - ], - 'base': [ - # Whitespace and Comments - (r'\n', Whitespace), - (r'\s+', Whitespace), - (r'//!.*?\n', String.Doc), - (r'///(\n|[^/].*?\n)', String.Doc), - (r'//(.*?)\n', Comment.Single), - (r'/\*\*(\n|[^/*])', String.Doc, 'doccomment'), - (r'/\*!', String.Doc, 'doccomment'), - (r'/\*', Comment.Multiline, 'comment'), - - # Macro parameters - (r"""\$([a-zA-Z_]\w*|\(,?|\),?|,?)""", Comment.Preproc), - # Keywords - (words(( - 'as', 'async', 'await', 'box', 'const', 'crate', 'dyn', 'else', - 'extern', 'for', 'if', 'impl', 'in', 'loop', 'match', 'move', - 'mut', 'pub', 'ref', 'return', 'static', 'super', 'trait', - 'try', 'unsafe', 'use', 'where', 'while', 'macro_rules!', - ), suffix=r'\b'), Keyword), - (words(('abstract', 'alignof', 'become', 'do', 'final', 'macro', - 'offsetof', 'override', 'priv', 'proc', 'pure', 'sizeof', - 'typeof', 'unsized', 'virtual', 'yield'), suffix=r'\b'), - Keyword.Reserved), - (r'(true|false)\b', Keyword.Constant), - (r'mod\b', Keyword, 'modname'), - (r'let\b', Keyword.Declaration), - (r'fn\b', Keyword, 'funcname'), - (r'(struct|enum|type|union)\b', Keyword, 'typename'), - (r'(default)(\s+)(type|fn)\b', bygroups(Keyword, Text, Keyword)), - keyword_types, - (r'[sS]elf\b', Name.Builtin.Pseudo), - # Prelude (taken from Rust's src/libstd/prelude.rs) - builtin_types, - builtin_funcs_macros, - # Path seperators, so types don't catch them. - (r'::\b', Text), - # Types in positions. - (r'(?::|->)', Text, 'typename'), - # Labels - (r'(break|continue)(\s*)(\'[A-Za-z_]\w*)?', - bygroups(Keyword, Text.Whitespace, Name.Label)), - - # Character literals - (r"""'(\\['"\\nrt]|\\x[0-7][0-9a-fA-F]|\\0""" - r"""|\\u\{[0-9a-fA-F]{1,6}\}|.)'""", - String.Char), - (r"""b'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\0""" - r"""|\\u\{[0-9a-fA-F]{1,6}\}|.)'""", - String.Char), - - # Binary literals - (r'0b[01_]+', Number.Bin, 'number_lit'), - # Octal literals - (r'0o[0-7_]+', Number.Oct, 'number_lit'), - # Hexadecimal literals - (r'0[xX][0-9a-fA-F_]+', Number.Hex, 'number_lit'), - # Decimal literals - (r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?[0-9_]+|' - r'\.[0-9_]*(?!\.)|[eE][+\-]?[0-9_]+)', Number.Float, - 'number_lit'), - (r'[0-9][0-9_]*', Number.Integer, 'number_lit'), - - # String literals - (r'b"', String, 'bytestring'), - (r'"', String, 'string'), - (r'b?r(#*)".*?"\1', String), - - # Lifetime names - (r"'(static|_)", Name.Builtin), - (r"'[a-zA-Z_]\w*", Name.Attribute), - - # Operators and Punctuation - (r'\.\.=?', Operator), - (r'[{}()\[\],.;]', Punctuation), - (r'[+\-*/%&|<>^!~@=:?]', Operator), - - # Identifiers - (r'[a-zA-Z_]\w*', Name), - # Raw identifiers - (r'r#[a-zA-Z_]\w*', Name), - - # Attributes - (r'#!?\[', Comment.Preproc, 'attribute['), - ], - 'comment': [ - (r'[^*/]+', Comment.Multiline), - (r'/\*', Comment.Multiline, '#push'), - (r'\*/', Comment.Multiline, '#pop'), - (r'[*/]', Comment.Multiline), - ], - 'doccomment': [ - (r'[^*/]+', String.Doc), - (r'/\*', String.Doc, '#push'), - (r'\*/', String.Doc, '#pop'), - (r'[*/]', String.Doc), - ], - 'modname': [ - (r'\s+', Text), - (r'[a-zA-Z_]\w*', Name.Namespace, '#pop'), - default('#pop'), - ], - 'funcname': [ - (r'\s+', Text), - (r'[a-zA-Z_]\w*', Name.Function, '#pop'), - default('#pop'), - ], - 'typename': [ - (r'\s+', Text), - (r'&', Keyword.Pseudo), - builtin_types, - keyword_types, - (r'[a-zA-Z_]\w*', Name.Class, '#pop'), - default('#pop'), - ], - 'number_lit': [ - (r'[ui](8|16|32|64|size)', Keyword, '#pop'), - (r'f(32|64)', Keyword, '#pop'), - default('#pop'), - ], - 'string': [ - (r'"', String, '#pop'), - (r"""\\['"\\nrt]|\\x[0-7][0-9a-fA-F]|\\0""" - r"""|\\u\{[0-9a-fA-F]{1,6}\}""", String.Escape), - (r'[^\\"]+', String), - (r'\\', String), - ], - 'bytestring': [ - (r"""\\x[89a-fA-F][0-9a-fA-F]""", String.Escape), - include('string'), - ], - 'attribute_common': [ - (r'"', String, 'string'), - (r'\[', Comment.Preproc, 'attribute['), - (r'\(', Comment.Preproc, 'attribute('), - ], - 'attribute[': [ - include('attribute_common'), - (r'\];?', Comment.Preproc, '#pop'), - (r'[^"\]]+', Comment.Preproc), - ], - 'attribute(': [ - include('attribute_common'), - (r'\);?', Comment.Preproc, '#pop'), - (r'[^")]+', Comment.Preproc), - ], - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.rust + ~~~~~~~~~~~~~~~~~~~~ + + Lexers for the Rust language. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, bygroups, words, default +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Whitespace + +__all__ = ['RustLexer'] + + +class RustLexer(RegexLexer): + """ + Lexer for the Rust programming language (version 1.40). + + .. versionadded:: 1.6 + """ + name = 'Rust' + filenames = ['*.rs', '*.rs.in'] + aliases = ['rust', 'rs'] + mimetypes = ['text/rust', 'text/x-rust'] + + keyword_types = (words(( + 'u8', 'u16', 'u32', 'u64', 'u128', 'i8', 'i16', 'i32', 'i64', 'i128', + 'usize', 'isize', 'f32', 'f64', 'char', 'str', 'bool', + ), suffix=r'\b'), Keyword.Type) + + builtin_types = (words(( + 'Send', 'Sized', 'Sync', 'Unpin', + 'Drop', 'Fn', 'FnMut', 'FnOnce', + 'AsRef', 'AsMut', 'Into', 'From', + 'Iterator', 'Extend', 'IntoIterator', 'DoubleEndedIterator', + 'ExactSizeIterator', 'Option', 'Result', + 'Box', 'ToOwned', 'String', 'ToString', 'Vec', + 'Clone', 'Copy', 'Default', 'Eq', 'Hash', 'Ord', 'PartialEq', + 'PartialOrd', 'Ord', + ), suffix=r'\b'), Name.Builtin) + + builtin_funcs_macros = (words(( + 'drop', 'Some', 'None', 'Ok', 'Err', + 'asm!', 'assert!', 'assert_eq!', 'assert_ne!', 'cfg!', 'column!', + 'compile_error!', 'concat!', 'concat_idents!', 'dbg!', 'debug_assert!', + 'debug_assert_eq!', 'debug_assert_ne!', 'env!', 'eprint!', 'eprintln!', + 'file!', 'format_args!', 'format_args_nl!', 'global_asm!', 'include!', + 'include_bytes!', 'include_str!', 'line!', 'log_syntax!', + 'module_path!', 'option_env!', 'panic!', 'print!', 'println!', + 'stringify!', 'thread_local!', 'todo!', 'trace_macros!', + 'unimplemented!', 'unreachable!', 'vec!', 'write!', 'writeln!', + ), suffix=r'\b'), Name.Builtin) + + tokens = { + 'root': [ + # rust allows a file to start with a shebang, but if the first line + # starts with #![ then it's not a shebang but a crate attribute. + (r'#![^[\r\n].*$', Comment.Preproc), + default('base'), + ], + 'base': [ + # Whitespace and Comments + (r'\n', Whitespace), + (r'\s+', Whitespace), + (r'//!.*?\n', String.Doc), + (r'///(\n|[^/].*?\n)', String.Doc), + (r'//(.*?)\n', Comment.Single), + (r'/\*\*(\n|[^/*])', String.Doc, 'doccomment'), + (r'/\*!', String.Doc, 'doccomment'), + (r'/\*', Comment.Multiline, 'comment'), + + # Macro parameters + (r"""\$([a-zA-Z_]\w*|\(,?|\),?|,?)""", Comment.Preproc), + # Keywords + (words(( + 'as', 'async', 'await', 'box', 'const', 'crate', 'dyn', 'else', + 'extern', 'for', 'if', 'impl', 'in', 'loop', 'match', 'move', + 'mut', 'pub', 'ref', 'return', 'static', 'super', 'trait', + 'try', 'unsafe', 'use', 'where', 'while', 'macro_rules!', + ), suffix=r'\b'), Keyword), + (words(('abstract', 'alignof', 'become', 'do', 'final', 'macro', + 'offsetof', 'override', 'priv', 'proc', 'pure', 'sizeof', + 'typeof', 'unsized', 'virtual', 'yield'), suffix=r'\b'), + Keyword.Reserved), + (r'(true|false)\b', Keyword.Constant), + (r'mod\b', Keyword, 'modname'), + (r'let\b', Keyword.Declaration), + (r'fn\b', Keyword, 'funcname'), + (r'(struct|enum|type|union)\b', Keyword, 'typename'), + (r'(default)(\s+)(type|fn)\b', bygroups(Keyword, Text, Keyword)), + keyword_types, + (r'[sS]elf\b', Name.Builtin.Pseudo), + # Prelude (taken from Rust's src/libstd/prelude.rs) + builtin_types, + builtin_funcs_macros, + # Path seperators, so types don't catch them. + (r'::\b', Text), + # Types in positions. + (r'(?::|->)', Text, 'typename'), + # Labels + (r'(break|continue)(\s*)(\'[A-Za-z_]\w*)?', + bygroups(Keyword, Text.Whitespace, Name.Label)), + + # Character literals + (r"""'(\\['"\\nrt]|\\x[0-7][0-9a-fA-F]|\\0""" + r"""|\\u\{[0-9a-fA-F]{1,6}\}|.)'""", + String.Char), + (r"""b'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\0""" + r"""|\\u\{[0-9a-fA-F]{1,6}\}|.)'""", + String.Char), + + # Binary literals + (r'0b[01_]+', Number.Bin, 'number_lit'), + # Octal literals + (r'0o[0-7_]+', Number.Oct, 'number_lit'), + # Hexadecimal literals + (r'0[xX][0-9a-fA-F_]+', Number.Hex, 'number_lit'), + # Decimal literals + (r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?[0-9_]+|' + r'\.[0-9_]*(?!\.)|[eE][+\-]?[0-9_]+)', Number.Float, + 'number_lit'), + (r'[0-9][0-9_]*', Number.Integer, 'number_lit'), + + # String literals + (r'b"', String, 'bytestring'), + (r'"', String, 'string'), + (r'b?r(#*)".*?"\1', String), + + # Lifetime names + (r"'", Operator, 'lifetime'), + + # Operators and Punctuation + (r'\.\.=?', Operator), + (r'[{}()\[\],.;]', Punctuation), + (r'[+\-*/%&|<>^!~@=:?]', Operator), + + # Identifiers + (r'[a-zA-Z_]\w*', Name), + # Raw identifiers + (r'r#[a-zA-Z_]\w*', Name), + + # Attributes + (r'#!?\[', Comment.Preproc, 'attribute['), + ], + 'comment': [ + (r'[^*/]+', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline), + ], + 'doccomment': [ + (r'[^*/]+', String.Doc), + (r'/\*', String.Doc, '#push'), + (r'\*/', String.Doc, '#pop'), + (r'[*/]', String.Doc), + ], + 'modname': [ + (r'\s+', Text), + (r'[a-zA-Z_]\w*', Name.Namespace, '#pop'), + default('#pop'), + ], + 'funcname': [ + (r'\s+', Text), + (r'[a-zA-Z_]\w*', Name.Function, '#pop'), + default('#pop'), + ], + 'typename': [ + (r'\s+', Text), + (r'&', Keyword.Pseudo), + (r"'", Operator, 'lifetime'), + builtin_types, + keyword_types, + (r'[a-zA-Z_]\w*', Name.Class, '#pop'), + default('#pop'), + ], + 'lifetime': [ + (r"(static|_)", Name.Builtin), + (r"[a-zA-Z_]+\w*", Name.Attribute), + default('#pop'), + ], + 'number_lit': [ + (r'[ui](8|16|32|64|size)', Keyword, '#pop'), + (r'f(32|64)', Keyword, '#pop'), + default('#pop'), + ], + 'string': [ + (r'"', String, '#pop'), + (r"""\\['"\\nrt]|\\x[0-7][0-9a-fA-F]|\\0""" + r"""|\\u\{[0-9a-fA-F]{1,6}\}""", String.Escape), + (r'[^\\"]+', String), + (r'\\', String), + ], + 'bytestring': [ + (r"""\\x[89a-fA-F][0-9a-fA-F]""", String.Escape), + include('string'), + ], + 'attribute_common': [ + (r'"', String, 'string'), + (r'\[', Comment.Preproc, 'attribute['), + (r'\(', Comment.Preproc, 'attribute('), + ], + 'attribute[': [ + include('attribute_common'), + (r'\];?', Comment.Preproc, '#pop'), + (r'[^"\]]+', Comment.Preproc), + ], + 'attribute(': [ + include('attribute_common'), + (r'\);?', Comment.Preproc, '#pop'), + (r'[^")]+', Comment.Preproc), + ], + } diff --git a/pygments/lexers/sas.py b/pygments/lexers/sas.py old mode 100644 new mode 100755 index 38d8077..1a5903b --- a/pygments/lexers/sas.py +++ b/pygments/lexers/sas.py @@ -1,228 +1,228 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.sas - ~~~~~~~~~~~~~~~~~~~ - - Lexer for SAS. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re -from pygments.lexer import RegexLexer, include, words -from pygments.token import Comment, Keyword, Name, Number, String, Text, \ - Other, Generic - -__all__ = ['SASLexer'] - - -class SASLexer(RegexLexer): - """ - For `SAS `_ files. - - .. versionadded:: 2.2 - """ - # Syntax from syntax/sas.vim by James Kidd - - name = 'SAS' - aliases = ['sas'] - filenames = ['*.SAS', '*.sas'] - mimetypes = ['text/x-sas', 'text/sas', 'application/x-sas'] - flags = re.IGNORECASE | re.MULTILINE - - builtins_macros = ( - "bquote", "nrbquote", "cmpres", "qcmpres", "compstor", "datatyp", - "display", "do", "else", "end", "eval", "global", "goto", "if", - "index", "input", "keydef", "label", "left", "length", "let", - "local", "lowcase", "macro", "mend", "nrquote", - "nrstr", "put", "qleft", "qlowcase", "qscan", - "qsubstr", "qsysfunc", "qtrim", "quote", "qupcase", "scan", - "str", "substr", "superq", "syscall", "sysevalf", "sysexec", - "sysfunc", "sysget", "syslput", "sysprod", "sysrc", "sysrput", - "then", "to", "trim", "unquote", "until", "upcase", "verify", - "while", "window" - ) - - builtins_conditionals = ( - "do", "if", "then", "else", "end", "until", "while" - ) - - builtins_statements = ( - "abort", "array", "attrib", "by", "call", "cards", "cards4", - "catname", "continue", "datalines", "datalines4", "delete", "delim", - "delimiter", "display", "dm", "drop", "endsas", "error", "file", - "filename", "footnote", "format", "goto", "in", "infile", "informat", - "input", "keep", "label", "leave", "length", "libname", "link", - "list", "lostcard", "merge", "missing", "modify", "options", "output", - "out", "page", "put", "redirect", "remove", "rename", "replace", - "retain", "return", "select", "set", "skip", "startsas", "stop", - "title", "update", "waitsas", "where", "window", "x", "systask" - ) - - builtins_sql = ( - "add", "and", "alter", "as", "cascade", "check", "create", - "delete", "describe", "distinct", "drop", "foreign", "from", - "group", "having", "index", "insert", "into", "in", "key", "like", - "message", "modify", "msgtype", "not", "null", "on", "or", - "order", "primary", "references", "reset", "restrict", "select", - "set", "table", "unique", "update", "validate", "view", "where" - ) - - builtins_functions = ( - "abs", "addr", "airy", "arcos", "arsin", "atan", "attrc", - "attrn", "band", "betainv", "blshift", "bnot", "bor", - "brshift", "bxor", "byte", "cdf", "ceil", "cexist", "cinv", - "close", "cnonct", "collate", "compbl", "compound", - "compress", "cos", "cosh", "css", "curobs", "cv", "daccdb", - "daccdbsl", "daccsl", "daccsyd", "dacctab", "dairy", "date", - "datejul", "datepart", "datetime", "day", "dclose", "depdb", - "depdbsl", "depsl", "depsyd", - "deptab", "dequote", "dhms", "dif", "digamma", - "dim", "dinfo", "dnum", "dopen", "doptname", "doptnum", - "dread", "dropnote", "dsname", "erf", "erfc", "exist", "exp", - "fappend", "fclose", "fcol", "fdelete", "fetch", "fetchobs", - "fexist", "fget", "fileexist", "filename", "fileref", - "finfo", "finv", "fipname", "fipnamel", "fipstate", "floor", - "fnonct", "fnote", "fopen", "foptname", "foptnum", "fpoint", - "fpos", "fput", "fread", "frewind", "frlen", "fsep", "fuzz", - "fwrite", "gaminv", "gamma", "getoption", "getvarc", "getvarn", - "hbound", "hms", "hosthelp", "hour", "ibessel", "index", - "indexc", "indexw", "input", "inputc", "inputn", "int", - "intck", "intnx", "intrr", "irr", "jbessel", "juldate", - "kurtosis", "lag", "lbound", "left", "length", "lgamma", - "libname", "libref", "log", "log10", "log2", "logpdf", "logpmf", - "logsdf", "lowcase", "max", "mdy", "mean", "min", "minute", - "mod", "month", "mopen", "mort", "n", "netpv", "nmiss", - "normal", "note", "npv", "open", "ordinal", "pathname", - "pdf", "peek", "peekc", "pmf", "point", "poisson", "poke", - "probbeta", "probbnml", "probchi", "probf", "probgam", - "probhypr", "probit", "probnegb", "probnorm", "probt", - "put", "putc", "putn", "qtr", "quote", "ranbin", "rancau", - "ranexp", "rangam", "range", "rank", "rannor", "ranpoi", - "rantbl", "rantri", "ranuni", "repeat", "resolve", "reverse", - "rewind", "right", "round", "saving", "scan", "sdf", "second", - "sign", "sin", "sinh", "skewness", "soundex", "spedis", - "sqrt", "std", "stderr", "stfips", "stname", "stnamel", - "substr", "sum", "symget", "sysget", "sysmsg", "sysprod", - "sysrc", "system", "tan", "tanh", "time", "timepart", "tinv", - "tnonct", "today", "translate", "tranwrd", "trigamma", - "trim", "trimn", "trunc", "uniform", "upcase", "uss", "var", - "varfmt", "varinfmt", "varlabel", "varlen", "varname", - "varnum", "varray", "varrayx", "vartype", "verify", "vformat", - "vformatd", "vformatdx", "vformatn", "vformatnx", "vformatw", - "vformatwx", "vformatx", "vinarray", "vinarrayx", "vinformat", - "vinformatd", "vinformatdx", "vinformatn", "vinformatnx", - "vinformatw", "vinformatwx", "vinformatx", "vlabel", - "vlabelx", "vlength", "vlengthx", "vname", "vnamex", "vtype", - "vtypex", "weekday", "year", "yyq", "zipfips", "zipname", - "zipnamel", "zipstate" - ) - - tokens = { - 'root': [ - include('comments'), - include('proc-data'), - include('cards-datalines'), - include('logs'), - include('general'), - (r'.', Text), - ], - # SAS is multi-line regardless, but * is ended by ; - 'comments': [ - (r'^\s*\*.*?;', Comment), - (r'/\*.*?\*/', Comment), - (r'^\s*\*(.|\n)*?;', Comment.Multiline), - (r'/[*](.|\n)*?[*]/', Comment.Multiline), - ], - # Special highlight for proc, data, quit, run - 'proc-data': [ - (r'(^|;)\s*(proc \w+|data|run|quit)[\s;]', - Keyword.Reserved), - ], - # Special highlight cards and datalines - 'cards-datalines': [ - (r'^\s*(datalines|cards)\s*;\s*$', Keyword, 'data'), - ], - 'data': [ - (r'(.|\n)*^\s*;\s*$', Other, '#pop'), - ], - # Special highlight for put NOTE|ERROR|WARNING (order matters) - 'logs': [ - (r'\n?^\s*%?put ', Keyword, 'log-messages'), - ], - 'log-messages': [ - (r'NOTE(:|-).*', Generic, '#pop'), - (r'WARNING(:|-).*', Generic.Emph, '#pop'), - (r'ERROR(:|-).*', Generic.Error, '#pop'), - include('general'), - ], - 'general': [ - include('keywords'), - include('vars-strings'), - include('special'), - include('numbers'), - ], - # Keywords, statements, functions, macros - 'keywords': [ - (words(builtins_statements, - prefix = r'\b', - suffix = r'\b'), - Keyword), - (words(builtins_sql, - prefix = r'\b', - suffix = r'\b'), - Keyword), - (words(builtins_conditionals, - prefix = r'\b', - suffix = r'\b'), - Keyword), - (words(builtins_macros, - prefix = r'%', - suffix = r'\b'), - Name.Builtin), - (words(builtins_functions, - prefix = r'\b', - suffix = r'\('), - Name.Builtin), - ], - # Strings and user-defined variables and macros (order matters) - 'vars-strings': [ - (r'&[a-z_]\w{0,31}\.?', Name.Variable), - (r'%[a-z_]\w{0,31}', Name.Function), - (r'\'', String, 'string_squote'), - (r'"', String, 'string_dquote'), - ], - 'string_squote': [ - ('\'', String, '#pop'), - (r'\\\\|\\"|\\\n', String.Escape), - # AFAIK, macro variables are not evaluated in single quotes - # (r'&', Name.Variable, 'validvar'), - (r'[^$\'\\]+', String), - (r'[$\'\\]', String), - ], - 'string_dquote': [ - (r'"', String, '#pop'), - (r'\\\\|\\"|\\\n', String.Escape), - (r'&', Name.Variable, 'validvar'), - (r'[^$&"\\]+', String), - (r'[$"\\]', String), - ], - 'validvar': [ - (r'[a-z_]\w{0,31}\.?', Name.Variable, '#pop'), - ], - # SAS numbers and special variables - 'numbers': [ - (r'\b[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)(E[+-]?[0-9]+)?i?\b', - Number), - ], - 'special': [ - (r'(null|missing|_all_|_automatic_|_character_|_n_|' - r'_infile_|_name_|_null_|_numeric_|_user_|_webout_)', - Keyword.Constant), - ], - # 'operators': [ - # (r'(-|=|<=|>=|<|>|<>|&|!=|' - # r'\||\*|\+|\^|/|!|~|~=)', Operator) - # ], - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.sas + ~~~~~~~~~~~~~~~~~~~ + + Lexer for SAS. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re +from pygments.lexer import RegexLexer, include, words +from pygments.token import Comment, Keyword, Name, Number, String, Text, \ + Other, Generic + +__all__ = ['SASLexer'] + + +class SASLexer(RegexLexer): + """ + For `SAS `_ files. + + .. versionadded:: 2.2 + """ + # Syntax from syntax/sas.vim by James Kidd + + name = 'SAS' + aliases = ['sas'] + filenames = ['*.SAS', '*.sas'] + mimetypes = ['text/x-sas', 'text/sas', 'application/x-sas'] + flags = re.IGNORECASE | re.MULTILINE + + builtins_macros = ( + "bquote", "nrbquote", "cmpres", "qcmpres", "compstor", "datatyp", + "display", "do", "else", "end", "eval", "global", "goto", "if", + "index", "input", "keydef", "label", "left", "length", "let", + "local", "lowcase", "macro", "mend", "nrquote", + "nrstr", "put", "qleft", "qlowcase", "qscan", + "qsubstr", "qsysfunc", "qtrim", "quote", "qupcase", "scan", + "str", "substr", "superq", "syscall", "sysevalf", "sysexec", + "sysfunc", "sysget", "syslput", "sysprod", "sysrc", "sysrput", + "then", "to", "trim", "unquote", "until", "upcase", "verify", + "while", "window" + ) + + builtins_conditionals = ( + "do", "if", "then", "else", "end", "until", "while" + ) + + builtins_statements = ( + "abort", "array", "attrib", "by", "call", "cards", "cards4", + "catname", "continue", "datalines", "datalines4", "delete", "delim", + "delimiter", "display", "dm", "drop", "endsas", "error", "file", + "filename", "footnote", "format", "goto", "in", "infile", "informat", + "input", "keep", "label", "leave", "length", "libname", "link", + "list", "lostcard", "merge", "missing", "modify", "options", "output", + "out", "page", "put", "redirect", "remove", "rename", "replace", + "retain", "return", "select", "set", "skip", "startsas", "stop", + "title", "update", "waitsas", "where", "window", "x", "systask" + ) + + builtins_sql = ( + "add", "and", "alter", "as", "cascade", "check", "create", + "delete", "describe", "distinct", "drop", "foreign", "from", + "group", "having", "index", "insert", "into", "in", "key", "like", + "message", "modify", "msgtype", "not", "null", "on", "or", + "order", "primary", "references", "reset", "restrict", "select", + "set", "table", "unique", "update", "validate", "view", "where" + ) + + builtins_functions = ( + "abs", "addr", "airy", "arcos", "arsin", "atan", "attrc", + "attrn", "band", "betainv", "blshift", "bnot", "bor", + "brshift", "bxor", "byte", "cdf", "ceil", "cexist", "cinv", + "close", "cnonct", "collate", "compbl", "compound", + "compress", "cos", "cosh", "css", "curobs", "cv", "daccdb", + "daccdbsl", "daccsl", "daccsyd", "dacctab", "dairy", "date", + "datejul", "datepart", "datetime", "day", "dclose", "depdb", + "depdbsl", "depsl", "depsyd", + "deptab", "dequote", "dhms", "dif", "digamma", + "dim", "dinfo", "dnum", "dopen", "doptname", "doptnum", + "dread", "dropnote", "dsname", "erf", "erfc", "exist", "exp", + "fappend", "fclose", "fcol", "fdelete", "fetch", "fetchobs", + "fexist", "fget", "fileexist", "filename", "fileref", + "finfo", "finv", "fipname", "fipnamel", "fipstate", "floor", + "fnonct", "fnote", "fopen", "foptname", "foptnum", "fpoint", + "fpos", "fput", "fread", "frewind", "frlen", "fsep", "fuzz", + "fwrite", "gaminv", "gamma", "getoption", "getvarc", "getvarn", + "hbound", "hms", "hosthelp", "hour", "ibessel", "index", + "indexc", "indexw", "input", "inputc", "inputn", "int", + "intck", "intnx", "intrr", "irr", "jbessel", "juldate", + "kurtosis", "lag", "lbound", "left", "length", "lgamma", + "libname", "libref", "log", "log10", "log2", "logpdf", "logpmf", + "logsdf", "lowcase", "max", "mdy", "mean", "min", "minute", + "mod", "month", "mopen", "mort", "n", "netpv", "nmiss", + "normal", "note", "npv", "open", "ordinal", "pathname", + "pdf", "peek", "peekc", "pmf", "point", "poisson", "poke", + "probbeta", "probbnml", "probchi", "probf", "probgam", + "probhypr", "probit", "probnegb", "probnorm", "probt", + "put", "putc", "putn", "qtr", "quote", "ranbin", "rancau", + "ranexp", "rangam", "range", "rank", "rannor", "ranpoi", + "rantbl", "rantri", "ranuni", "repeat", "resolve", "reverse", + "rewind", "right", "round", "saving", "scan", "sdf", "second", + "sign", "sin", "sinh", "skewness", "soundex", "spedis", + "sqrt", "std", "stderr", "stfips", "stname", "stnamel", + "substr", "sum", "symget", "sysget", "sysmsg", "sysprod", + "sysrc", "system", "tan", "tanh", "time", "timepart", "tinv", + "tnonct", "today", "translate", "tranwrd", "trigamma", + "trim", "trimn", "trunc", "uniform", "upcase", "uss", "var", + "varfmt", "varinfmt", "varlabel", "varlen", "varname", + "varnum", "varray", "varrayx", "vartype", "verify", "vformat", + "vformatd", "vformatdx", "vformatn", "vformatnx", "vformatw", + "vformatwx", "vformatx", "vinarray", "vinarrayx", "vinformat", + "vinformatd", "vinformatdx", "vinformatn", "vinformatnx", + "vinformatw", "vinformatwx", "vinformatx", "vlabel", + "vlabelx", "vlength", "vlengthx", "vname", "vnamex", "vtype", + "vtypex", "weekday", "year", "yyq", "zipfips", "zipname", + "zipnamel", "zipstate" + ) + + tokens = { + 'root': [ + include('comments'), + include('proc-data'), + include('cards-datalines'), + include('logs'), + include('general'), + (r'.', Text), + ], + # SAS is multi-line regardless, but * is ended by ; + 'comments': [ + (r'^\s*\*.*?;', Comment), + (r'/\*.*?\*/', Comment), + (r'^\s*\*(.|\n)*?;', Comment.Multiline), + (r'/[*](.|\n)*?[*]/', Comment.Multiline), + ], + # Special highlight for proc, data, quit, run + 'proc-data': [ + (r'(^|;)\s*(proc \w+|data|run|quit)[\s;]', + Keyword.Reserved), + ], + # Special highlight cards and datalines + 'cards-datalines': [ + (r'^\s*(datalines|cards)\s*;\s*$', Keyword, 'data'), + ], + 'data': [ + (r'(.|\n)*^\s*;\s*$', Other, '#pop'), + ], + # Special highlight for put NOTE|ERROR|WARNING (order matters) + 'logs': [ + (r'\n?^\s*%?put ', Keyword, 'log-messages'), + ], + 'log-messages': [ + (r'NOTE(:|-).*', Generic, '#pop'), + (r'WARNING(:|-).*', Generic.Emph, '#pop'), + (r'ERROR(:|-).*', Generic.Error, '#pop'), + include('general'), + ], + 'general': [ + include('keywords'), + include('vars-strings'), + include('special'), + include('numbers'), + ], + # Keywords, statements, functions, macros + 'keywords': [ + (words(builtins_statements, + prefix = r'\b', + suffix = r'\b'), + Keyword), + (words(builtins_sql, + prefix = r'\b', + suffix = r'\b'), + Keyword), + (words(builtins_conditionals, + prefix = r'\b', + suffix = r'\b'), + Keyword), + (words(builtins_macros, + prefix = r'%', + suffix = r'\b'), + Name.Builtin), + (words(builtins_functions, + prefix = r'\b', + suffix = r'\('), + Name.Builtin), + ], + # Strings and user-defined variables and macros (order matters) + 'vars-strings': [ + (r'&[a-z_]\w{0,31}\.?', Name.Variable), + (r'%[a-z_]\w{0,31}', Name.Function), + (r'\'', String, 'string_squote'), + (r'"', String, 'string_dquote'), + ], + 'string_squote': [ + ('\'', String, '#pop'), + (r'\\\\|\\"|\\\n', String.Escape), + # AFAIK, macro variables are not evaluated in single quotes + # (r'&', Name.Variable, 'validvar'), + (r'[^$\'\\]+', String), + (r'[$\'\\]', String), + ], + 'string_dquote': [ + (r'"', String, '#pop'), + (r'\\\\|\\"|\\\n', String.Escape), + (r'&', Name.Variable, 'validvar'), + (r'[^$&"\\]+', String), + (r'[$"\\]', String), + ], + 'validvar': [ + (r'[a-z_]\w{0,31}\.?', Name.Variable, '#pop'), + ], + # SAS numbers and special variables + 'numbers': [ + (r'\b[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)(E[+-]?[0-9]+)?i?\b', + Number), + ], + 'special': [ + (r'(null|missing|_all_|_automatic_|_character_|_n_|' + r'_infile_|_name_|_null_|_numeric_|_user_|_webout_)', + Keyword.Constant), + ], + # 'operators': [ + # (r'(-|=|<=|>=|<|>|<>|&|!=|' + # r'\||\*|\+|\^|/|!|~|~=)', Operator) + # ], + } diff --git a/pygments/lexers/scdoc.py b/pygments/lexers/scdoc.py old mode 100644 new mode 100755 index 4916393..eb6ca69 --- a/pygments/lexers/scdoc.py +++ b/pygments/lexers/scdoc.py @@ -1,70 +1,70 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.scdoc - ~~~~~~~~~~~~~~~~~~~~~ - - Lexer for scdoc, a simple man page generator. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, include, bygroups, \ - using, this -from pygments.token import Text, Comment, Keyword, String, \ - Generic - - -__all__ = ['ScdocLexer'] - - -class ScdocLexer(RegexLexer): - """ - `scdoc` is a simple man page generator for POSIX systems written in C99. - https://git.sr.ht/~sircmpwn/scdoc - - .. versionadded:: 2.5 - """ - name = 'scdoc' - aliases = ['scdoc', 'scd'] - filenames = ['*.scd', '*.scdoc'] - flags = re.MULTILINE - - tokens = { - 'root': [ - # comment - (r'^(;.+\n)', bygroups(Comment)), - - # heading with pound prefix - (r'^(#)([^#].+\n)', bygroups(Generic.Heading, Text)), - (r'^(#{2})(.+\n)', bygroups(Generic.Subheading, Text)), - # bulleted lists - (r'^(\s*)([*-])(\s)(.+\n)', - bygroups(Text, Keyword, Text, using(this, state='inline'))), - # numbered lists - (r'^(\s*)(\.+\.)( .+\n)', - bygroups(Text, Keyword, using(this, state='inline'))), - # quote - (r'^(\s*>\s)(.+\n)', bygroups(Keyword, Generic.Emph)), - # text block - (r'^(```\n)([\w\W]*?)(^```$)', bygroups(String, Text, String)), - - include('inline'), - ], - 'inline': [ - # escape - (r'\\.', Text), - # underlines - (r'(\s)(_[^_]+_)(\W|\n)', bygroups(Text, Generic.Emph, Text)), - # bold - (r'(\s)(\*[^\*]+\*)(\W|\n)', bygroups(Text, Generic.Strong, Text)), - # inline code - (r'`[^`]+`', String.Backtick), - - # general text, must come last! - (r'[^\\\s]+', Text), - (r'.', Text), - ], - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.scdoc + ~~~~~~~~~~~~~~~~~~~~~ + + Lexer for scdoc, a simple man page generator. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, \ + using, this +from pygments.token import Text, Comment, Keyword, String, \ + Generic + + +__all__ = ['ScdocLexer'] + + +class ScdocLexer(RegexLexer): + """ + `scdoc` is a simple man page generator for POSIX systems written in C99. + https://git.sr.ht/~sircmpwn/scdoc + + .. versionadded:: 2.5 + """ + name = 'scdoc' + aliases = ['scdoc', 'scd'] + filenames = ['*.scd', '*.scdoc'] + flags = re.MULTILINE + + tokens = { + 'root': [ + # comment + (r'^(;.+\n)', bygroups(Comment)), + + # heading with pound prefix + (r'^(#)([^#].+\n)', bygroups(Generic.Heading, Text)), + (r'^(#{2})(.+\n)', bygroups(Generic.Subheading, Text)), + # bulleted lists + (r'^(\s*)([*-])(\s)(.+\n)', + bygroups(Text, Keyword, Text, using(this, state='inline'))), + # numbered lists + (r'^(\s*)(\.+\.)( .+\n)', + bygroups(Text, Keyword, using(this, state='inline'))), + # quote + (r'^(\s*>\s)(.+\n)', bygroups(Keyword, Generic.Emph)), + # text block + (r'^(```\n)([\w\W]*?)(^```$)', bygroups(String, Text, String)), + + include('inline'), + ], + 'inline': [ + # escape + (r'\\.', Text), + # underlines + (r'(\s)(_[^_]+_)(\W|\n)', bygroups(Text, Generic.Emph, Text)), + # bold + (r'(\s)(\*[^*]+\*)(\W|\n)', bygroups(Text, Generic.Strong, Text)), + # inline code + (r'`[^`]+`', String.Backtick), + + # general text, must come last! + (r'[^\\\s]+', Text), + (r'.', Text), + ], + } diff --git a/pygments/lexers/scripting.py b/pygments/lexers/scripting.py old mode 100644 new mode 100755 index a20c54b..1b45e38 --- a/pygments/lexers/scripting.py +++ b/pygments/lexers/scripting.py @@ -1,1275 +1,1275 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.scripting - ~~~~~~~~~~~~~~~~~~~~~~~~~ - - Lexer for scripting and embedded languages. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, include, bygroups, default, combined, \ - words -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Error, Whitespace, Other -from pygments.util import get_bool_opt, get_list_opt - -__all__ = ['LuaLexer', 'MoonScriptLexer', 'ChaiscriptLexer', 'LSLLexer', - 'AppleScriptLexer', 'RexxLexer', 'MOOCodeLexer', 'HybrisLexer', - 'EasytrieveLexer', 'JclLexer', 'MiniScriptLexer'] - - -class LuaLexer(RegexLexer): - """ - For `Lua `_ source code. - - Additional options accepted: - - `func_name_highlighting` - If given and ``True``, highlight builtin function names - (default: ``True``). - `disabled_modules` - If given, must be a list of module names whose function names - should not be highlighted. By default all modules are highlighted. - - To get a list of allowed modules have a look into the - `_lua_builtins` module: - - .. sourcecode:: pycon - - >>> from pygments.lexers._lua_builtins import MODULES - >>> MODULES.keys() - ['string', 'coroutine', 'modules', 'io', 'basic', ...] - """ - - name = 'Lua' - aliases = ['lua'] - filenames = ['*.lua', '*.wlua'] - mimetypes = ['text/x-lua', 'application/x-lua'] - - _comment_multiline = r'(?:--\[(?P=*)\[[\w\W]*?\](?P=level)\])' - _comment_single = r'(?:--.*$)' - _space = r'(?:\s+)' - _s = r'(?:%s|%s|%s)' % (_comment_multiline, _comment_single, _space) - _name = r'(?:[^\W\d]\w*)' - - tokens = { - 'root': [ - # Lua allows a file to start with a shebang. - (r'#!.*', Comment.Preproc), - default('base'), - ], - 'ws': [ - (_comment_multiline, Comment.Multiline), - (_comment_single, Comment.Single), - (_space, Text), - ], - 'base': [ - include('ws'), - - (r'(?i)0x[\da-f]*(\.[\da-f]*)?(p[+-]?\d+)?', Number.Hex), - (r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float), - (r'(?i)\d+e[+-]?\d+', Number.Float), - (r'\d+', Number.Integer), - - # multiline strings - (r'(?s)\[(=*)\[.*?\]\1\]', String), - - (r'::', Punctuation, 'label'), - (r'\.{3}', Punctuation), - (r'[=<>|~&+\-*/%#^]+|\.\.', Operator), - (r'[\[\]{}().,:;]', Punctuation), - (r'(and|or|not)\b', Operator.Word), - - ('(break|do|else|elseif|end|for|if|in|repeat|return|then|until|' - r'while)\b', Keyword.Reserved), - (r'goto\b', Keyword.Reserved, 'goto'), - (r'(local)\b', Keyword.Declaration), - (r'(true|false|nil)\b', Keyword.Constant), - - (r'(function)\b', Keyword.Reserved, 'funcname'), - - (r'[A-Za-z_]\w*(\.[A-Za-z_]\w*)?', Name), - - ("'", String.Single, combined('stringescape', 'sqs')), - ('"', String.Double, combined('stringescape', 'dqs')) - ], - - 'funcname': [ - include('ws'), - (r'[.:]', Punctuation), - (r'%s(?=%s*[.:])' % (_name, _s), Name.Class), - (_name, Name.Function, '#pop'), - # inline function - (r'\(', Punctuation, '#pop'), - ], - - 'goto': [ - include('ws'), - (_name, Name.Label, '#pop'), - ], - - 'label': [ - include('ws'), - (r'::', Punctuation, '#pop'), - (_name, Name.Label), - ], - - 'stringescape': [ - (r'\\([abfnrtv\\"\']|[\r\n]{1,2}|z\s*|x[0-9a-fA-F]{2}|\d{1,3}|' - r'u\{[0-9a-fA-F]+\})', String.Escape), - ], - - 'sqs': [ - (r"'", String.Single, '#pop'), - (r"[^\\']+", String.Single), - ], - - 'dqs': [ - (r'"', String.Double, '#pop'), - (r'[^\\"]+', String.Double), - ] - } - - def __init__(self, **options): - self.func_name_highlighting = get_bool_opt( - options, 'func_name_highlighting', True) - self.disabled_modules = get_list_opt(options, 'disabled_modules', []) - - self._functions = set() - if self.func_name_highlighting: - from pygments.lexers._lua_builtins import MODULES - for mod, func in MODULES.items(): - if mod not in self.disabled_modules: - self._functions.update(func) - RegexLexer.__init__(self, **options) - - def get_tokens_unprocessed(self, text): - for index, token, value in \ - RegexLexer.get_tokens_unprocessed(self, text): - if token is Name: - if value in self._functions: - yield index, Name.Builtin, value - continue - elif '.' in value: - a, b = value.split('.') - yield index, Name, a - yield index + len(a), Punctuation, u'.' - yield index + len(a) + 1, Name, b - continue - yield index, token, value - -class MoonScriptLexer(LuaLexer): - """ - For `MoonScript `_ source code. - - .. versionadded:: 1.5 - """ - - name = "MoonScript" - aliases = ["moon", "moonscript"] - filenames = ["*.moon"] - mimetypes = ['text/x-moonscript', 'application/x-moonscript'] - - tokens = { - 'root': [ - (r'#!(.*?)$', Comment.Preproc), - default('base'), - ], - 'base': [ - ('--.*$', Comment.Single), - (r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float), - (r'(?i)\d+e[+-]?\d+', Number.Float), - (r'(?i)0x[0-9a-f]*', Number.Hex), - (r'\d+', Number.Integer), - (r'\n', Text), - (r'[^\S\n]+', Text), - (r'(?s)\[(=*)\[.*?\]\1\]', String), - (r'(->|=>)', Name.Function), - (r':[a-zA-Z_]\w*', Name.Variable), - (r'(==|!=|~=|<=|>=|\.\.\.|\.\.|[=+\-*/%^<>#!.\\:])', Operator), - (r'[;,]', Punctuation), - (r'[\[\]{}()]', Keyword.Type), - (r'[a-zA-Z_]\w*:', Name.Variable), - (words(( - 'class', 'extends', 'if', 'then', 'super', 'do', 'with', - 'import', 'export', 'while', 'elseif', 'return', 'for', 'in', - 'from', 'when', 'using', 'else', 'and', 'or', 'not', 'switch', - 'break'), suffix=r'\b'), - Keyword), - (r'(true|false|nil)\b', Keyword.Constant), - (r'(and|or|not)\b', Operator.Word), - (r'(self)\b', Name.Builtin.Pseudo), - (r'@@?([a-zA-Z_]\w*)?', Name.Variable.Class), - (r'[A-Z]\w*', Name.Class), # proper name - (r'[A-Za-z_]\w*(\.[A-Za-z_]\w*)?', Name), - ("'", String.Single, combined('stringescape', 'sqs')), - ('"', String.Double, combined('stringescape', 'dqs')) - ], - 'stringescape': [ - (r'''\\([abfnrtv\\"']|\d{1,3})''', String.Escape) - ], - 'sqs': [ - ("'", String.Single, '#pop'), - (".", String) - ], - 'dqs': [ - ('"', String.Double, '#pop'), - (".", String) - ] - } - - def get_tokens_unprocessed(self, text): - # set . as Operator instead of Punctuation - for index, token, value in LuaLexer.get_tokens_unprocessed(self, text): - if token == Punctuation and value == ".": - token = Operator - yield index, token, value - - -class ChaiscriptLexer(RegexLexer): - """ - For `ChaiScript `_ source code. - - .. versionadded:: 2.0 - """ - - name = 'ChaiScript' - aliases = ['chai', 'chaiscript'] - filenames = ['*.chai'] - mimetypes = ['text/x-chaiscript', 'application/x-chaiscript'] - - flags = re.DOTALL | re.MULTILINE - - tokens = { - 'commentsandwhitespace': [ - (r'\s+', Text), - (r'//.*?\n', Comment.Single), - (r'/\*.*?\*/', Comment.Multiline), - (r'^\#.*?\n', Comment.Single) - ], - 'slashstartsregex': [ - include('commentsandwhitespace'), - (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' - r'([gim]+\b|\B)', String.Regex, '#pop'), - (r'(?=/)', Text, ('#pop', 'badregex')), - default('#pop') - ], - 'badregex': [ - (r'\n', Text, '#pop') - ], - 'root': [ - include('commentsandwhitespace'), - (r'\n', Text), - (r'[^\S\n]+', Text), - (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|\.\.' - r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'), - (r'[{(\[;,]', Punctuation, 'slashstartsregex'), - (r'[})\].]', Punctuation), - (r'[=+\-*/]', Operator), - (r'(for|in|while|do|break|return|continue|if|else|' - r'throw|try|catch' - r')\b', Keyword, 'slashstartsregex'), - (r'(var)\b', Keyword.Declaration, 'slashstartsregex'), - (r'(attr|def|fun)\b', Keyword.Reserved), - (r'(true|false)\b', Keyword.Constant), - (r'(eval|throw)\b', Name.Builtin), - (r'`\S+`', Name.Builtin), - (r'[$a-zA-Z_]\w*', Name.Other), - (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), - (r'0x[0-9a-fA-F]+', Number.Hex), - (r'[0-9]+', Number.Integer), - (r'"', String.Double, 'dqstring'), - (r"'(\\\\|\\'|[^'])*'", String.Single), - ], - 'dqstring': [ - (r'\$\{[^"}]+?\}', String.Interpol), - (r'\$', String.Double), - (r'\\\\', String.Double), - (r'\\"', String.Double), - (r'[^\\"$]+', String.Double), - (r'"', String.Double, '#pop'), - ], - } - - -class LSLLexer(RegexLexer): - """ - For Second Life's Linden Scripting Language source code. - - .. versionadded:: 2.0 - """ - - name = 'LSL' - aliases = ['lsl'] - filenames = ['*.lsl'] - mimetypes = ['text/x-lsl'] - - flags = re.MULTILINE - - lsl_keywords = r'\b(?:do|else|for|if|jump|return|while)\b' - lsl_types = r'\b(?:float|integer|key|list|quaternion|rotation|string|vector)\b' - lsl_states = r'\b(?:(?:state)\s+\w+|default)\b' - lsl_events = r'\b(?:state_(?:entry|exit)|touch(?:_(?:start|end))?|(?:land_)?collision(?:_(?:start|end))?|timer|listen|(?:no_)?sensor|control|(?:not_)?at_(?:rot_)?target|money|email|run_time_permissions|changed|attach|dataserver|moving_(?:start|end)|link_message|(?:on|object)_rez|remote_data|http_re(?:sponse|quest)|path_update|transaction_result)\b' - lsl_functions_builtin = r'\b(?:ll(?:ReturnObjectsBy(?:ID|Owner)|Json(?:2List|[GS]etValue|ValueType)|Sin|Cos|Tan|Atan2|Sqrt|Pow|Abs|Fabs|Frand|Floor|Ceil|Round|Vec(?:Mag|Norm|Dist)|Rot(?:Between|2(?:Euler|Fwd|Left|Up))|(?:Euler|Axes)2Rot|Whisper|(?:Region|Owner)?Say|Shout|Listen(?:Control|Remove)?|Sensor(?:Repeat|Remove)?|Detected(?:Name|Key|Owner|Type|Pos|Vel|Grab|Rot|Group|LinkNumber)|Die|Ground|Wind|(?:[GS]et)(?:AnimationOverride|MemoryLimit|PrimMediaParams|ParcelMusicURL|Object(?:Desc|Name)|PhysicsMaterial|Status|Scale|Color|Alpha|Texture|Pos|Rot|Force|Torque)|ResetAnimationOverride|(?:Scale|Offset|Rotate)Texture|(?:Rot)?Target(?:Remove)?|(?:Stop)?MoveToTarget|Apply(?:Rotational)?Impulse|Set(?:KeyframedMotion|ContentType|RegionPos|(?:Angular)?Velocity|Buoyancy|HoverHeight|ForceAndTorque|TimerEvent|ScriptState|Damage|TextureAnim|Sound(?:Queueing|Radius)|Vehicle(?:Type|(?:Float|Vector|Rotation)Param)|(?:Touch|Sit)?Text|Camera(?:Eye|At)Offset|PrimitiveParams|ClickAction|Link(?:Alpha|Color|PrimitiveParams(?:Fast)?|Texture(?:Anim)?|Camera|Media)|RemoteScriptAccessPin|PayPrice|LocalRot)|ScaleByFactor|Get(?:(?:Max|Min)ScaleFactor|ClosestNavPoint|StaticPath|SimStats|Env|PrimitiveParams|Link(?:PrimitiveParams|Number(?:OfSides)?|Key|Name|Media)|HTTPHeader|FreeURLs|Object(?:Details|PermMask|PrimCount)|Parcel(?:MaxPrims|Details|Prim(?:Count|Owners))|Attached|(?:SPMax|Free|Used)Memory|Region(?:Name|TimeDilation|FPS|Corner|AgentCount)|Root(?:Position|Rotation)|UnixTime|(?:Parcel|Region)Flags|(?:Wall|GMT)clock|SimulatorHostname|BoundingBox|GeometricCenter|Creator|NumberOf(?:Prims|NotecardLines|Sides)|Animation(?:List)?|(?:Camera|Local)(?:Pos|Rot)|Vel|Accel|Omega|Time(?:stamp|OfDay)|(?:Object|CenterOf)?Mass|MassMKS|Energy|Owner|(?:Owner)?Key|SunDirection|Texture(?:Offset|Scale|Rot)|Inventory(?:Number|Name|Key|Type|Creator|PermMask)|Permissions(?:Key)?|StartParameter|List(?:Length|EntryType)|Date|Agent(?:Size|Info|Language|List)|LandOwnerAt|NotecardLine|Script(?:Name|State))|(?:Get|Reset|GetAndReset)Time|PlaySound(?:Slave)?|LoopSound(?:Master|Slave)?|(?:Trigger|Stop|Preload)Sound|(?:(?:Get|Delete)Sub|Insert)String|To(?:Upper|Lower)|Give(?:InventoryList|Money)|RezObject|(?:Stop)?LookAt|Sleep|CollisionFilter|(?:Take|Release)Controls|DetachFromAvatar|AttachToAvatar(?:Temp)?|InstantMessage|(?:GetNext)?Email|StopHover|MinEventDelay|RotLookAt|String(?:Length|Trim)|(?:Start|Stop)Animation|TargetOmega|RequestPermissions|(?:Create|Break)Link|BreakAllLinks|(?:Give|Remove)Inventory|Water|PassTouches|Request(?:Agent|Inventory)Data|TeleportAgent(?:Home|GlobalCoords)?|ModifyLand|CollisionSound|ResetScript|MessageLinked|PushObject|PassCollisions|AxisAngle2Rot|Rot2(?:Axis|Angle)|A(?:cos|sin)|AngleBetween|AllowInventoryDrop|SubStringIndex|List2(?:CSV|Integer|Json|Float|String|Key|Vector|Rot|List(?:Strided)?)|DeleteSubList|List(?:Statistics|Sort|Randomize|(?:Insert|Find|Replace)List)|EdgeOfWorld|AdjustSoundVolume|Key2Name|TriggerSoundLimited|EjectFromLand|(?:CSV|ParseString)2List|OverMyLand|SameGroup|UnSit|Ground(?:Slope|Normal|Contour)|GroundRepel|(?:Set|Remove)VehicleFlags|(?:AvatarOn)?(?:Link)?SitTarget|Script(?:Danger|Profiler)|Dialog|VolumeDetect|ResetOtherScript|RemoteLoadScriptPin|(?:Open|Close)RemoteDataChannel|SendRemoteData|RemoteDataReply|(?:Integer|String)ToBase64|XorBase64|Log(?:10)?|Base64To(?:String|Integer)|ParseStringKeepNulls|RezAtRoot|RequestSimulatorData|ForceMouselook|(?:Load|Release|(?:E|Une)scape)URL|ParcelMedia(?:CommandList|Query)|ModPow|MapDestination|(?:RemoveFrom|AddTo|Reset)Land(?:Pass|Ban)List|(?:Set|Clear)CameraParams|HTTP(?:Request|Response)|TextBox|DetectedTouch(?:UV|Face|Pos|(?:N|Bin)ormal|ST)|(?:MD5|SHA1|DumpList2)String|Request(?:Secure)?URL|Clear(?:Prim|Link)Media|(?:Link)?ParticleSystem|(?:Get|Request)(?:Username|DisplayName)|RegionSayTo|CastRay|GenerateKey|TransferLindenDollars|ManageEstateAccess|(?:Create|Delete)Character|ExecCharacterCmd|Evade|FleeFrom|NavigateTo|PatrolPoints|Pursue|UpdateCharacter|WanderWithin))\b' - lsl_constants_float = r'\b(?:DEG_TO_RAD|PI(?:_BY_TWO)?|RAD_TO_DEG|SQRT2|TWO_PI)\b' - lsl_constants_integer = r'\b(?:JSON_APPEND|STATUS_(?:PHYSICS|ROTATE_[XYZ]|PHANTOM|SANDBOX|BLOCK_GRAB(?:_OBJECT)?|(?:DIE|RETURN)_AT_EDGE|CAST_SHADOWS|OK|MALFORMED_PARAMS|TYPE_MISMATCH|BOUNDS_ERROR|NOT_(?:FOUND|SUPPORTED)|INTERNAL_ERROR|WHITELIST_FAILED)|AGENT(?:_(?:BY_(?:LEGACY_|USER)NAME|FLYING|ATTACHMENTS|SCRIPTED|MOUSELOOK|SITTING|ON_OBJECT|AWAY|WALKING|IN_AIR|TYPING|CROUCHING|BUSY|ALWAYS_RUN|AUTOPILOT|LIST_(?:PARCEL(?:_OWNER)?|REGION)))?|CAMERA_(?:PITCH|DISTANCE|BEHINDNESS_(?:ANGLE|LAG)|(?:FOCUS|POSITION)(?:_(?:THRESHOLD|LOCKED|LAG))?|FOCUS_OFFSET|ACTIVE)|ANIM_ON|LOOP|REVERSE|PING_PONG|SMOOTH|ROTATE|SCALE|ALL_SIDES|LINK_(?:ROOT|SET|ALL_(?:OTHERS|CHILDREN)|THIS)|ACTIVE|PASSIVE|SCRIPTED|CONTROL_(?:FWD|BACK|(?:ROT_)?(?:LEFT|RIGHT)|UP|DOWN|(?:ML_)?LBUTTON)|PERMISSION_(?:RETURN_OBJECTS|DEBIT|OVERRIDE_ANIMATIONS|SILENT_ESTATE_MANAGEMENT|TAKE_CONTROLS|TRIGGER_ANIMATION|ATTACH|CHANGE_LINKS|(?:CONTROL|TRACK)_CAMERA|TELEPORT)|INVENTORY_(?:TEXTURE|SOUND|OBJECT|SCRIPT|LANDMARK|CLOTHING|NOTECARD|BODYPART|ANIMATION|GESTURE|ALL|NONE)|CHANGED_(?:INVENTORY|COLOR|SHAPE|SCALE|TEXTURE|LINK|ALLOWED_DROP|OWNER|REGION(?:_START)?|TELEPORT|MEDIA)|OBJECT_(?:(?:PHYSICS|SERVER|STREAMING)_COST|UNKNOWN_DETAIL|CHARACTER_TIME|PHANTOM|PHYSICS|TEMP_ON_REZ|NAME|DESC|POS|PRIM_EQUIVALENCE|RETURN_(?:PARCEL(?:_OWNER)?|REGION)|ROO?T|VELOCITY|OWNER|GROUP|CREATOR|ATTACHED_POINT|RENDER_WEIGHT|PATHFINDING_TYPE|(?:RUNNING|TOTAL)_SCRIPT_COUNT|SCRIPT_(?:MEMORY|TIME))|TYPE_(?:INTEGER|FLOAT|STRING|KEY|VECTOR|ROTATION|INVALID)|(?:DEBUG|PUBLIC)_CHANNEL|ATTACH_(?:AVATAR_CENTER|CHEST|HEAD|BACK|PELVIS|MOUTH|CHIN|NECK|NOSE|BELLY|[LR](?:SHOULDER|HAND|FOOT|EAR|EYE|[UL](?:ARM|LEG)|HIP)|(?:LEFT|RIGHT)_PEC|HUD_(?:CENTER_[12]|TOP_(?:RIGHT|CENTER|LEFT)|BOTTOM(?:_(?:RIGHT|LEFT))?))|LAND_(?:LEVEL|RAISE|LOWER|SMOOTH|NOISE|REVERT)|DATA_(?:ONLINE|NAME|BORN|SIM_(?:POS|STATUS|RATING)|PAYINFO)|PAYMENT_INFO_(?:ON_FILE|USED)|REMOTE_DATA_(?:CHANNEL|REQUEST|REPLY)|PSYS_(?:PART_(?:BF_(?:ZERO|ONE(?:_MINUS_(?:DEST_COLOR|SOURCE_(ALPHA|COLOR)))?|DEST_COLOR|SOURCE_(ALPHA|COLOR))|BLEND_FUNC_(DEST|SOURCE)|FLAGS|(?:START|END)_(?:COLOR|ALPHA|SCALE|GLOW)|MAX_AGE|(?:RIBBON|WIND|INTERP_(?:COLOR|SCALE)|BOUNCE|FOLLOW_(?:SRC|VELOCITY)|TARGET_(?:POS|LINEAR)|EMISSIVE)_MASK)|SRC_(?:MAX_AGE|PATTERN|ANGLE_(?:BEGIN|END)|BURST_(?:RATE|PART_COUNT|RADIUS|SPEED_(?:MIN|MAX))|ACCEL|TEXTURE|TARGET_KEY|OMEGA|PATTERN_(?:DROP|EXPLODE|ANGLE(?:_CONE(?:_EMPTY)?)?)))|VEHICLE_(?:REFERENCE_FRAME|TYPE_(?:NONE|SLED|CAR|BOAT|AIRPLANE|BALLOON)|(?:LINEAR|ANGULAR)_(?:FRICTION_TIMESCALE|MOTOR_DIRECTION)|LINEAR_MOTOR_OFFSET|HOVER_(?:HEIGHT|EFFICIENCY|TIMESCALE)|BUOYANCY|(?:LINEAR|ANGULAR)_(?:DEFLECTION_(?:EFFICIENCY|TIMESCALE)|MOTOR_(?:DECAY_)?TIMESCALE)|VERTICAL_ATTRACTION_(?:EFFICIENCY|TIMESCALE)|BANKING_(?:EFFICIENCY|MIX|TIMESCALE)|FLAG_(?:NO_DEFLECTION_UP|LIMIT_(?:ROLL_ONLY|MOTOR_UP)|HOVER_(?:(?:WATER|TERRAIN|UP)_ONLY|GLOBAL_HEIGHT)|MOUSELOOK_(?:STEER|BANK)|CAMERA_DECOUPLED))|PRIM_(?:TYPE(?:_(?:BOX|CYLINDER|PRISM|SPHERE|TORUS|TUBE|RING|SCULPT))?|HOLE_(?:DEFAULT|CIRCLE|SQUARE|TRIANGLE)|MATERIAL(?:_(?:STONE|METAL|GLASS|WOOD|FLESH|PLASTIC|RUBBER))?|SHINY_(?:NONE|LOW|MEDIUM|HIGH)|BUMP_(?:NONE|BRIGHT|DARK|WOOD|BARK|BRICKS|CHECKER|CONCRETE|TILE|STONE|DISKS|GRAVEL|BLOBS|SIDING|LARGETILE|STUCCO|SUCTION|WEAVE)|TEXGEN_(?:DEFAULT|PLANAR)|SCULPT_(?:TYPE_(?:SPHERE|TORUS|PLANE|CYLINDER|MASK)|FLAG_(?:MIRROR|INVERT))|PHYSICS(?:_(?:SHAPE_(?:CONVEX|NONE|PRIM|TYPE)))?|(?:POS|ROT)_LOCAL|SLICE|TEXT|FLEXIBLE|POINT_LIGHT|TEMP_ON_REZ|PHANTOM|POSITION|SIZE|ROTATION|TEXTURE|NAME|OMEGA|DESC|LINK_TARGET|COLOR|BUMP_SHINY|FULLBRIGHT|TEXGEN|GLOW|MEDIA_(?:ALT_IMAGE_ENABLE|CONTROLS|(?:CURRENT|HOME)_URL|AUTO_(?:LOOP|PLAY|SCALE|ZOOM)|FIRST_CLICK_INTERACT|(?:WIDTH|HEIGHT)_PIXELS|WHITELIST(?:_ENABLE)?|PERMS_(?:INTERACT|CONTROL)|PARAM_MAX|CONTROLS_(?:STANDARD|MINI)|PERM_(?:NONE|OWNER|GROUP|ANYONE)|MAX_(?:URL_LENGTH|WHITELIST_(?:SIZE|COUNT)|(?:WIDTH|HEIGHT)_PIXELS)))|MASK_(?:BASE|OWNER|GROUP|EVERYONE|NEXT)|PERM_(?:TRANSFER|MODIFY|COPY|MOVE|ALL)|PARCEL_(?:MEDIA_COMMAND_(?:STOP|PAUSE|PLAY|LOOP|TEXTURE|URL|TIME|AGENT|UNLOAD|AUTO_ALIGN|TYPE|SIZE|DESC|LOOP_SET)|FLAG_(?:ALLOW_(?:FLY|(?:GROUP_)?SCRIPTS|LANDMARK|TERRAFORM|DAMAGE|CREATE_(?:GROUP_)?OBJECTS)|USE_(?:ACCESS_(?:GROUP|LIST)|BAN_LIST|LAND_PASS_LIST)|LOCAL_SOUND_ONLY|RESTRICT_PUSHOBJECT|ALLOW_(?:GROUP|ALL)_OBJECT_ENTRY)|COUNT_(?:TOTAL|OWNER|GROUP|OTHER|SELECTED|TEMP)|DETAILS_(?:NAME|DESC|OWNER|GROUP|AREA|ID|SEE_AVATARS))|LIST_STAT_(?:MAX|MIN|MEAN|MEDIAN|STD_DEV|SUM(?:_SQUARES)?|NUM_COUNT|GEOMETRIC_MEAN|RANGE)|PAY_(?:HIDE|DEFAULT)|REGION_FLAG_(?:ALLOW_DAMAGE|FIXED_SUN|BLOCK_TERRAFORM|SANDBOX|DISABLE_(?:COLLISIONS|PHYSICS)|BLOCK_FLY|ALLOW_DIRECT_TELEPORT|RESTRICT_PUSHOBJECT)|HTTP_(?:METHOD|MIMETYPE|BODY_(?:MAXLENGTH|TRUNCATED)|CUSTOM_HEADER|PRAGMA_NO_CACHE|VERBOSE_THROTTLE|VERIFY_CERT)|STRING_(?:TRIM(?:_(?:HEAD|TAIL))?)|CLICK_ACTION_(?:NONE|TOUCH|SIT|BUY|PAY|OPEN(?:_MEDIA)?|PLAY|ZOOM)|TOUCH_INVALID_FACE|PROFILE_(?:NONE|SCRIPT_MEMORY)|RC_(?:DATA_FLAGS|DETECT_PHANTOM|GET_(?:LINK_NUM|NORMAL|ROOT_KEY)|MAX_HITS|REJECT_(?:TYPES|AGENTS|(?:NON)?PHYSICAL|LAND))|RCERR_(?:CAST_TIME_EXCEEDED|SIM_PERF_LOW|UNKNOWN)|ESTATE_ACCESS_(?:ALLOWED_(?:AGENT|GROUP)_(?:ADD|REMOVE)|BANNED_AGENT_(?:ADD|REMOVE))|DENSITY|FRICTION|RESTITUTION|GRAVITY_MULTIPLIER|KFM_(?:COMMAND|CMD_(?:PLAY|STOP|PAUSE|SET_MODE)|MODE|FORWARD|LOOP|PING_PONG|REVERSE|DATA|ROTATION|TRANSLATION)|ERR_(?:GENERIC|PARCEL_PERMISSIONS|MALFORMED_PARAMS|RUNTIME_PERMISSIONS|THROTTLED)|CHARACTER_(?:CMD_(?:(?:SMOOTH_)?STOP|JUMP)|DESIRED_(?:TURN_)?SPEED|RADIUS|STAY_WITHIN_PARCEL|LENGTH|ORIENTATION|ACCOUNT_FOR_SKIPPED_FRAMES|AVOIDANCE_MODE|TYPE(?:_(?:[A-D]|NONE))?|MAX_(?:DECEL|TURN_RADIUS|(?:ACCEL|SPEED)))|PURSUIT_(?:OFFSET|FUZZ_FACTOR|GOAL_TOLERANCE|INTERCEPT)|REQUIRE_LINE_OF_SIGHT|FORCE_DIRECT_PATH|VERTICAL|HORIZONTAL|AVOID_(?:CHARACTERS|DYNAMIC_OBSTACLES|NONE)|PU_(?:EVADE_(?:HIDDEN|SPOTTED)|FAILURE_(?:DYNAMIC_PATHFINDING_DISABLED|INVALID_(?:GOAL|START)|NO_(?:NAVMESH|VALID_DESTINATION)|OTHER|TARGET_GONE|(?:PARCEL_)?UNREACHABLE)|(?:GOAL|SLOWDOWN_DISTANCE)_REACHED)|TRAVERSAL_TYPE(?:_(?:FAST|NONE|SLOW))?|CONTENT_TYPE_(?:ATOM|FORM|HTML|JSON|LLSD|RSS|TEXT|XHTML|XML)|GCNP_(?:RADIUS|STATIC)|(?:PATROL|WANDER)_PAUSE_AT_WAYPOINTS|OPT_(?:AVATAR|CHARACTER|EXCLUSION_VOLUME|LEGACY_LINKSET|MATERIAL_VOLUME|OTHER|STATIC_OBSTACLE|WALKABLE)|SIM_STAT_PCT_CHARS_STEPPED)\b' - lsl_constants_integer_boolean = r'\b(?:FALSE|TRUE)\b' - lsl_constants_rotation = r'\b(?:ZERO_ROTATION)\b' - lsl_constants_string = r'\b(?:EOF|JSON_(?:ARRAY|DELETE|FALSE|INVALID|NULL|NUMBER|OBJECT|STRING|TRUE)|NULL_KEY|TEXTURE_(?:BLANK|DEFAULT|MEDIA|PLYWOOD|TRANSPARENT)|URL_REQUEST_(?:GRANTED|DENIED))\b' - lsl_constants_vector = r'\b(?:TOUCH_INVALID_(?:TEXCOORD|VECTOR)|ZERO_VECTOR)\b' - lsl_invalid_broken = r'\b(?:LAND_(?:LARGE|MEDIUM|SMALL)_BRUSH)\b' - lsl_invalid_deprecated = r'\b(?:ATTACH_[LR]PEC|DATA_RATING|OBJECT_ATTACHMENT_(?:GEOMETRY_BYTES|SURFACE_AREA)|PRIM_(?:CAST_SHADOWS|MATERIAL_LIGHT|TYPE_LEGACY)|PSYS_SRC_(?:INNER|OUTER)ANGLE|VEHICLE_FLAG_NO_FLY_UP|ll(?:Cloud|Make(?:Explosion|Fountain|Smoke|Fire)|RemoteDataSetRegion|Sound(?:Preload)?|XorBase64Strings(?:Correct)?))\b' - lsl_invalid_illegal = r'\b(?:event)\b' - lsl_invalid_unimplemented = r'\b(?:CHARACTER_(?:MAX_ANGULAR_(?:ACCEL|SPEED)|TURN_SPEED_MULTIPLIER)|PERMISSION_(?:CHANGE_(?:JOINTS|PERMISSIONS)|RELEASE_OWNERSHIP|REMAP_CONTROLS)|PRIM_PHYSICS_MATERIAL|PSYS_SRC_OBJ_REL_MASK|ll(?:CollisionSprite|(?:Stop)?PointAt|(?:(?:Refresh|Set)Prim)URL|(?:Take|Release)Camera|RemoteLoadScript))\b' - lsl_reserved_godmode = r'\b(?:ll(?:GodLikeRezObject|Set(?:Inventory|Object)PermMask))\b' - lsl_reserved_log = r'\b(?:print)\b' - lsl_operators = r'\+\+|\-\-|<<|>>|&&?|\|\|?|\^|~|[!%<>=*+\-/]=?' - - tokens = { - 'root': - [ - (r'//.*?\n', Comment.Single), - (r'/\*', Comment.Multiline, 'comment'), - (r'"', String.Double, 'string'), - (lsl_keywords, Keyword), - (lsl_types, Keyword.Type), - (lsl_states, Name.Class), - (lsl_events, Name.Builtin), - (lsl_functions_builtin, Name.Function), - (lsl_constants_float, Keyword.Constant), - (lsl_constants_integer, Keyword.Constant), - (lsl_constants_integer_boolean, Keyword.Constant), - (lsl_constants_rotation, Keyword.Constant), - (lsl_constants_string, Keyword.Constant), - (lsl_constants_vector, Keyword.Constant), - (lsl_invalid_broken, Error), - (lsl_invalid_deprecated, Error), - (lsl_invalid_illegal, Error), - (lsl_invalid_unimplemented, Error), - (lsl_reserved_godmode, Keyword.Reserved), - (lsl_reserved_log, Keyword.Reserved), - (r'\b([a-zA-Z_]\w*)\b', Name.Variable), - (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d*', Number.Float), - (r'(\d+\.\d*|\.\d+)', Number.Float), - (r'0[xX][0-9a-fA-F]+', Number.Hex), - (r'\d+', Number.Integer), - (lsl_operators, Operator), - (r':=?', Error), - (r'[,;{}()\[\]]', Punctuation), - (r'\n+', Whitespace), - (r'\s+', Whitespace) - ], - 'comment': - [ - (r'[^*/]+', Comment.Multiline), - (r'/\*', Comment.Multiline, '#push'), - (r'\*/', Comment.Multiline, '#pop'), - (r'[*/]', Comment.Multiline) - ], - 'string': - [ - (r'\\([nt"\\])', String.Escape), - (r'"', String.Double, '#pop'), - (r'\\.', Error), - (r'[^"\\]+', String.Double), - ] - } - - -class AppleScriptLexer(RegexLexer): - """ - For `AppleScript source code - `_, - including `AppleScript Studio - `_. - Contributed by Andreas Amann . - - .. versionadded:: 1.0 - """ - - name = 'AppleScript' - aliases = ['applescript'] - filenames = ['*.applescript'] - - flags = re.MULTILINE | re.DOTALL - - Identifiers = r'[a-zA-Z]\w*' - - # XXX: use words() for all of these - Literals = ('AppleScript', 'current application', 'false', 'linefeed', - 'missing value', 'pi', 'quote', 'result', 'return', 'space', - 'tab', 'text item delimiters', 'true', 'version') - Classes = ('alias ', 'application ', 'boolean ', 'class ', 'constant ', - 'date ', 'file ', 'integer ', 'list ', 'number ', 'POSIX file ', - 'real ', 'record ', 'reference ', 'RGB color ', 'script ', - 'text ', 'unit types', '(?:Unicode )?text', 'string') - BuiltIn = ('attachment', 'attribute run', 'character', 'day', 'month', - 'paragraph', 'word', 'year') - HandlerParams = ('about', 'above', 'against', 'apart from', 'around', - 'aside from', 'at', 'below', 'beneath', 'beside', - 'between', 'for', 'given', 'instead of', 'on', 'onto', - 'out of', 'over', 'since') - Commands = ('ASCII (character|number)', 'activate', 'beep', 'choose URL', - 'choose application', 'choose color', 'choose file( name)?', - 'choose folder', 'choose from list', - 'choose remote application', 'clipboard info', - 'close( access)?', 'copy', 'count', 'current date', 'delay', - 'delete', 'display (alert|dialog)', 'do shell script', - 'duplicate', 'exists', 'get eof', 'get volume settings', - 'info for', 'launch', 'list (disks|folder)', 'load script', - 'log', 'make', 'mount volume', 'new', 'offset', - 'open( (for access|location))?', 'path to', 'print', 'quit', - 'random number', 'read', 'round', 'run( script)?', - 'say', 'scripting components', - 'set (eof|the clipboard to|volume)', 'store script', - 'summarize', 'system attribute', 'system info', - 'the clipboard', 'time to GMT', 'write', 'quoted form') - References = ('(in )?back of', '(in )?front of', '[0-9]+(st|nd|rd|th)', - 'first', 'second', 'third', 'fourth', 'fifth', 'sixth', - 'seventh', 'eighth', 'ninth', 'tenth', 'after', 'back', - 'before', 'behind', 'every', 'front', 'index', 'last', - 'middle', 'some', 'that', 'through', 'thru', 'where', 'whose') - Operators = ("and", "or", "is equal", "equals", "(is )?equal to", "is not", - "isn't", "isn't equal( to)?", "is not equal( to)?", - "doesn't equal", "does not equal", "(is )?greater than", - "comes after", "is not less than or equal( to)?", - "isn't less than or equal( to)?", "(is )?less than", - "comes before", "is not greater than or equal( to)?", - "isn't greater than or equal( to)?", - "(is )?greater than or equal( to)?", "is not less than", - "isn't less than", "does not come before", - "doesn't come before", "(is )?less than or equal( to)?", - "is not greater than", "isn't greater than", - "does not come after", "doesn't come after", "starts? with", - "begins? with", "ends? with", "contains?", "does not contain", - "doesn't contain", "is in", "is contained by", "is not in", - "is not contained by", "isn't contained by", "div", "mod", - "not", "(a )?(ref( to)?|reference to)", "is", "does") - Control = ('considering', 'else', 'error', 'exit', 'from', 'if', - 'ignoring', 'in', 'repeat', 'tell', 'then', 'times', 'to', - 'try', 'until', 'using terms from', 'while', 'whith', - 'with timeout( of)?', 'with transaction', 'by', 'continue', - 'end', 'its?', 'me', 'my', 'return', 'of', 'as') - Declarations = ('global', 'local', 'prop(erty)?', 'set', 'get') - Reserved = ('but', 'put', 'returning', 'the') - StudioClasses = ('action cell', 'alert reply', 'application', 'box', - 'browser( cell)?', 'bundle', 'button( cell)?', 'cell', - 'clip view', 'color well', 'color-panel', - 'combo box( item)?', 'control', - 'data( (cell|column|item|row|source))?', 'default entry', - 'dialog reply', 'document', 'drag info', 'drawer', - 'event', 'font(-panel)?', 'formatter', - 'image( (cell|view))?', 'matrix', 'menu( item)?', 'item', - 'movie( view)?', 'open-panel', 'outline view', 'panel', - 'pasteboard', 'plugin', 'popup button', - 'progress indicator', 'responder', 'save-panel', - 'scroll view', 'secure text field( cell)?', 'slider', - 'sound', 'split view', 'stepper', 'tab view( item)?', - 'table( (column|header cell|header view|view))', - 'text( (field( cell)?|view))?', 'toolbar( item)?', - 'user-defaults', 'view', 'window') - StudioEvents = ('accept outline drop', 'accept table drop', 'action', - 'activated', 'alert ended', 'awake from nib', 'became key', - 'became main', 'begin editing', 'bounds changed', - 'cell value', 'cell value changed', 'change cell value', - 'change item value', 'changed', 'child of item', - 'choose menu item', 'clicked', 'clicked toolbar item', - 'closed', 'column clicked', 'column moved', - 'column resized', 'conclude drop', 'data representation', - 'deminiaturized', 'dialog ended', 'document nib name', - 'double clicked', 'drag( (entered|exited|updated))?', - 'drop', 'end editing', 'exposed', 'idle', 'item expandable', - 'item value', 'item value changed', 'items changed', - 'keyboard down', 'keyboard up', 'launched', - 'load data representation', 'miniaturized', 'mouse down', - 'mouse dragged', 'mouse entered', 'mouse exited', - 'mouse moved', 'mouse up', 'moved', - 'number of browser rows', 'number of items', - 'number of rows', 'open untitled', 'opened', 'panel ended', - 'parameters updated', 'plugin loaded', 'prepare drop', - 'prepare outline drag', 'prepare outline drop', - 'prepare table drag', 'prepare table drop', - 'read from file', 'resigned active', 'resigned key', - 'resigned main', 'resized( sub views)?', - 'right mouse down', 'right mouse dragged', - 'right mouse up', 'rows changed', 'scroll wheel', - 'selected tab view item', 'selection changed', - 'selection changing', 'should begin editing', - 'should close', 'should collapse item', - 'should end editing', 'should expand item', - 'should open( untitled)?', - 'should quit( after last window closed)?', - 'should select column', 'should select item', - 'should select row', 'should select tab view item', - 'should selection change', 'should zoom', 'shown', - 'update menu item', 'update parameters', - 'update toolbar item', 'was hidden', 'was miniaturized', - 'will become active', 'will close', 'will dismiss', - 'will display browser cell', 'will display cell', - 'will display item cell', 'will display outline cell', - 'will finish launching', 'will hide', 'will miniaturize', - 'will move', 'will open', 'will pop up', 'will quit', - 'will resign active', 'will resize( sub views)?', - 'will select tab view item', 'will show', 'will zoom', - 'write to file', 'zoomed') - StudioCommands = ('animate', 'append', 'call method', 'center', - 'close drawer', 'close panel', 'display', - 'display alert', 'display dialog', 'display panel', 'go', - 'hide', 'highlight', 'increment', 'item for', - 'load image', 'load movie', 'load nib', 'load panel', - 'load sound', 'localized string', 'lock focus', 'log', - 'open drawer', 'path for', 'pause', 'perform action', - 'play', 'register', 'resume', 'scroll', 'select( all)?', - 'show', 'size to fit', 'start', 'step back', - 'step forward', 'stop', 'synchronize', 'unlock focus', - 'update') - StudioProperties = ('accepts arrow key', 'action method', 'active', - 'alignment', 'allowed identifiers', - 'allows branch selection', 'allows column reordering', - 'allows column resizing', 'allows column selection', - 'allows customization', - 'allows editing text attributes', - 'allows empty selection', 'allows mixed state', - 'allows multiple selection', 'allows reordering', - 'allows undo', 'alpha( value)?', 'alternate image', - 'alternate increment value', 'alternate title', - 'animation delay', 'associated file name', - 'associated object', 'auto completes', 'auto display', - 'auto enables items', 'auto repeat', - 'auto resizes( outline column)?', - 'auto save expanded items', 'auto save name', - 'auto save table columns', 'auto saves configuration', - 'auto scroll', 'auto sizes all columns to fit', - 'auto sizes cells', 'background color', 'bezel state', - 'bezel style', 'bezeled', 'border rect', 'border type', - 'bordered', 'bounds( rotation)?', 'box type', - 'button returned', 'button type', - 'can choose directories', 'can choose files', - 'can draw', 'can hide', - 'cell( (background color|size|type))?', 'characters', - 'class', 'click count', 'clicked( data)? column', - 'clicked data item', 'clicked( data)? row', - 'closeable', 'collating', 'color( (mode|panel))', - 'command key down', 'configuration', - 'content(s| (size|view( margins)?))?', 'context', - 'continuous', 'control key down', 'control size', - 'control tint', 'control view', - 'controller visible', 'coordinate system', - 'copies( on scroll)?', 'corner view', 'current cell', - 'current column', 'current( field)? editor', - 'current( menu)? item', 'current row', - 'current tab view item', 'data source', - 'default identifiers', 'delta (x|y|z)', - 'destination window', 'directory', 'display mode', - 'displayed cell', 'document( (edited|rect|view))?', - 'double value', 'dragged column', 'dragged distance', - 'dragged items', 'draws( cell)? background', - 'draws grid', 'dynamically scrolls', 'echos bullets', - 'edge', 'editable', 'edited( data)? column', - 'edited data item', 'edited( data)? row', 'enabled', - 'enclosing scroll view', 'ending page', - 'error handling', 'event number', 'event type', - 'excluded from windows menu', 'executable path', - 'expanded', 'fax number', 'field editor', 'file kind', - 'file name', 'file type', 'first responder', - 'first visible column', 'flipped', 'floating', - 'font( panel)?', 'formatter', 'frameworks path', - 'frontmost', 'gave up', 'grid color', 'has data items', - 'has horizontal ruler', 'has horizontal scroller', - 'has parent data item', 'has resize indicator', - 'has shadow', 'has sub menu', 'has vertical ruler', - 'has vertical scroller', 'header cell', 'header view', - 'hidden', 'hides when deactivated', 'highlights by', - 'horizontal line scroll', 'horizontal page scroll', - 'horizontal ruler view', 'horizontally resizable', - 'icon image', 'id', 'identifier', - 'ignores multiple clicks', - 'image( (alignment|dims when disabled|frame style|scaling))?', - 'imports graphics', 'increment value', - 'indentation per level', 'indeterminate', 'index', - 'integer value', 'intercell spacing', 'item height', - 'key( (code|equivalent( modifier)?|window))?', - 'knob thickness', 'label', 'last( visible)? column', - 'leading offset', 'leaf', 'level', 'line scroll', - 'loaded', 'localized sort', 'location', 'loop mode', - 'main( (bunde|menu|window))?', 'marker follows cell', - 'matrix mode', 'maximum( content)? size', - 'maximum visible columns', - 'menu( form representation)?', 'miniaturizable', - 'miniaturized', 'minimized image', 'minimized title', - 'minimum column width', 'minimum( content)? size', - 'modal', 'modified', 'mouse down state', - 'movie( (controller|file|rect))?', 'muted', 'name', - 'needs display', 'next state', 'next text', - 'number of tick marks', 'only tick mark values', - 'opaque', 'open panel', 'option key down', - 'outline table column', 'page scroll', 'pages across', - 'pages down', 'palette label', 'pane splitter', - 'parent data item', 'parent window', 'pasteboard', - 'path( (names|separator))?', 'playing', - 'plays every frame', 'plays selection only', 'position', - 'preferred edge', 'preferred type', 'pressure', - 'previous text', 'prompt', 'properties', - 'prototype cell', 'pulls down', 'rate', - 'released when closed', 'repeated', - 'requested print time', 'required file type', - 'resizable', 'resized column', 'resource path', - 'returns records', 'reuses columns', 'rich text', - 'roll over', 'row height', 'rulers visible', - 'save panel', 'scripts path', 'scrollable', - 'selectable( identifiers)?', 'selected cell', - 'selected( data)? columns?', 'selected data items?', - 'selected( data)? rows?', 'selected item identifier', - 'selection by rect', 'send action on arrow key', - 'sends action when done editing', 'separates columns', - 'separator item', 'sequence number', 'services menu', - 'shared frameworks path', 'shared support path', - 'sheet', 'shift key down', 'shows alpha', - 'shows state by', 'size( mode)?', - 'smart insert delete enabled', 'sort case sensitivity', - 'sort column', 'sort order', 'sort type', - 'sorted( data rows)?', 'sound', 'source( mask)?', - 'spell checking enabled', 'starting page', 'state', - 'string value', 'sub menu', 'super menu', 'super view', - 'tab key traverses cells', 'tab state', 'tab type', - 'tab view', 'table view', 'tag', 'target( printer)?', - 'text color', 'text container insert', - 'text container origin', 'text returned', - 'tick mark position', 'time stamp', - 'title(d| (cell|font|height|position|rect))?', - 'tool tip', 'toolbar', 'trailing offset', 'transparent', - 'treat packages as directories', 'truncated labels', - 'types', 'unmodified characters', 'update views', - 'use sort indicator', 'user defaults', - 'uses data source', 'uses ruler', - 'uses threaded animation', - 'uses title from previous column', 'value wraps', - 'version', - 'vertical( (line scroll|page scroll|ruler view))?', - 'vertically resizable', 'view', - 'visible( document rect)?', 'volume', 'width', 'window', - 'windows menu', 'wraps', 'zoomable', 'zoomed') - - tokens = { - 'root': [ - (r'\s+', Text), - (u'¬\\n', String.Escape), - (r"'s\s+", Text), # This is a possessive, consider moving - (r'(--|#).*?$', Comment), - (r'\(\*', Comment.Multiline, 'comment'), - (r'[(){}!,.:]', Punctuation), - (u'(«)([^»]+)(»)', - bygroups(Text, Name.Builtin, Text)), - (r'\b((?:considering|ignoring)\s*)' - r'(application responses|case|diacriticals|hyphens|' - r'numeric strings|punctuation|white space)', - bygroups(Keyword, Name.Builtin)), - (u'(-|\\*|\\+|&|≠|>=?|<=?|=|≥|≤|/|÷|\\^)', Operator), - (r"\b(%s)\b" % '|'.join(Operators), Operator.Word), - (r'^(\s*(?:on|end)\s+)' - r'(%s)' % '|'.join(StudioEvents[::-1]), - bygroups(Keyword, Name.Function)), - (r'^(\s*)(in|on|script|to)(\s+)', bygroups(Text, Keyword, Text)), - (r'\b(as )(%s)\b' % '|'.join(Classes), - bygroups(Keyword, Name.Class)), - (r'\b(%s)\b' % '|'.join(Literals), Name.Constant), - (r'\b(%s)\b' % '|'.join(Commands), Name.Builtin), - (r'\b(%s)\b' % '|'.join(Control), Keyword), - (r'\b(%s)\b' % '|'.join(Declarations), Keyword), - (r'\b(%s)\b' % '|'.join(Reserved), Name.Builtin), - (r'\b(%s)s?\b' % '|'.join(BuiltIn), Name.Builtin), - (r'\b(%s)\b' % '|'.join(HandlerParams), Name.Builtin), - (r'\b(%s)\b' % '|'.join(StudioProperties), Name.Attribute), - (r'\b(%s)s?\b' % '|'.join(StudioClasses), Name.Builtin), - (r'\b(%s)\b' % '|'.join(StudioCommands), Name.Builtin), - (r'\b(%s)\b' % '|'.join(References), Name.Builtin), - (r'"(\\\\|\\"|[^"])*"', String.Double), - (r'\b(%s)\b' % Identifiers, Name.Variable), - (r'[-+]?(\d+\.\d*|\d*\.\d+)(E[-+][0-9]+)?', Number.Float), - (r'[-+]?\d+', Number.Integer), - ], - 'comment': [ - (r'\(\*', Comment.Multiline, '#push'), - (r'\*\)', Comment.Multiline, '#pop'), - ('[^*(]+', Comment.Multiline), - ('[*(]', Comment.Multiline), - ], - } - - -class RexxLexer(RegexLexer): - """ - `Rexx `_ is a scripting language available for - a wide range of different platforms with its roots found on mainframe - systems. It is popular for I/O- and data based tasks and can act as glue - language to bind different applications together. - - .. versionadded:: 2.0 - """ - name = 'Rexx' - aliases = ['rexx', 'arexx'] - filenames = ['*.rexx', '*.rex', '*.rx', '*.arexx'] - mimetypes = ['text/x-rexx'] - flags = re.IGNORECASE - - tokens = { - 'root': [ - (r'\s', Whitespace), - (r'/\*', Comment.Multiline, 'comment'), - (r'"', String, 'string_double'), - (r"'", String, 'string_single'), - (r'[0-9]+(\.[0-9]+)?(e[+-]?[0-9])?', Number), - (r'([a-z_]\w*)(\s*)(:)(\s*)(procedure)\b', - bygroups(Name.Function, Whitespace, Operator, Whitespace, - Keyword.Declaration)), - (r'([a-z_]\w*)(\s*)(:)', - bygroups(Name.Label, Whitespace, Operator)), - include('function'), - include('keyword'), - include('operator'), - (r'[a-z_]\w*', Text), - ], - 'function': [ - (words(( - 'abbrev', 'abs', 'address', 'arg', 'b2x', 'bitand', 'bitor', 'bitxor', - 'c2d', 'c2x', 'center', 'charin', 'charout', 'chars', 'compare', - 'condition', 'copies', 'd2c', 'd2x', 'datatype', 'date', 'delstr', - 'delword', 'digits', 'errortext', 'form', 'format', 'fuzz', 'insert', - 'lastpos', 'left', 'length', 'linein', 'lineout', 'lines', 'max', - 'min', 'overlay', 'pos', 'queued', 'random', 'reverse', 'right', 'sign', - 'sourceline', 'space', 'stream', 'strip', 'substr', 'subword', 'symbol', - 'time', 'trace', 'translate', 'trunc', 'value', 'verify', 'word', - 'wordindex', 'wordlength', 'wordpos', 'words', 'x2b', 'x2c', 'x2d', - 'xrange'), suffix=r'(\s*)(\()'), - bygroups(Name.Builtin, Whitespace, Operator)), - ], - 'keyword': [ - (r'(address|arg|by|call|do|drop|else|end|exit|for|forever|if|' - r'interpret|iterate|leave|nop|numeric|off|on|options|parse|' - r'pull|push|queue|return|say|select|signal|to|then|trace|until|' - r'while)\b', Keyword.Reserved), - ], - 'operator': [ - (r'(-|//|/|\(|\)|\*\*|\*|\\<<|\\<|\\==|\\=|\\>>|\\>|\\|\|\||\||' - r'&&|&|%|\+|<<=|<<|<=|<>|<|==|=|><|>=|>>=|>>|>|¬<<|¬<|¬==|¬=|' - r'¬>>|¬>|¬|\.|,)', Operator), - ], - 'string_double': [ - (r'[^"\n]+', String), - (r'""', String), - (r'"', String, '#pop'), - (r'\n', Text, '#pop'), # Stray linefeed also terminates strings. - ], - 'string_single': [ - (r'[^\'\n]', String), - (r'\'\'', String), - (r'\'', String, '#pop'), - (r'\n', Text, '#pop'), # Stray linefeed also terminates strings. - ], - 'comment': [ - (r'[^*]+', Comment.Multiline), - (r'\*/', Comment.Multiline, '#pop'), - (r'\*', Comment.Multiline), - ] - } - - _c = lambda s: re.compile(s, re.MULTILINE) - _ADDRESS_COMMAND_PATTERN = _c(r'^\s*address\s+command\b') - _ADDRESS_PATTERN = _c(r'^\s*address\s+') - _DO_WHILE_PATTERN = _c(r'^\s*do\s+while\b') - _IF_THEN_DO_PATTERN = _c(r'^\s*if\b.+\bthen\s+do\s*$') - _PROCEDURE_PATTERN = _c(r'^\s*([a-z_]\w*)(\s*)(:)(\s*)(procedure)\b') - _ELSE_DO_PATTERN = _c(r'\belse\s+do\s*$') - _PARSE_ARG_PATTERN = _c(r'^\s*parse\s+(upper\s+)?(arg|value)\b') - PATTERNS_AND_WEIGHTS = ( - (_ADDRESS_COMMAND_PATTERN, 0.2), - (_ADDRESS_PATTERN, 0.05), - (_DO_WHILE_PATTERN, 0.1), - (_ELSE_DO_PATTERN, 0.1), - (_IF_THEN_DO_PATTERN, 0.1), - (_PROCEDURE_PATTERN, 0.5), - (_PARSE_ARG_PATTERN, 0.2), - ) - - def analyse_text(text): - """ - Check for inital comment and patterns that distinguish Rexx from other - C-like languages. - """ - if re.search(r'/\*\**\s*rexx', text, re.IGNORECASE): - # Header matches MVS Rexx requirements, this is certainly a Rexx - # script. - return 1.0 - elif text.startswith('/*'): - # Header matches general Rexx requirements; the source code might - # still be any language using C comments such as C++, C# or Java. - lowerText = text.lower() - result = sum(weight - for (pattern, weight) in RexxLexer.PATTERNS_AND_WEIGHTS - if pattern.search(lowerText)) + 0.01 - return min(result, 1.0) - - -class MOOCodeLexer(RegexLexer): - """ - For `MOOCode `_ (the MOO scripting - language). - - .. versionadded:: 0.9 - """ - name = 'MOOCode' - filenames = ['*.moo'] - aliases = ['moocode', 'moo'] - mimetypes = ['text/x-moocode'] - - tokens = { - 'root': [ - # Numbers - (r'(0|[1-9][0-9_]*)', Number.Integer), - # Strings - (r'"(\\\\|\\"|[^"])*"', String), - # exceptions - (r'(E_PERM|E_DIV)', Name.Exception), - # db-refs - (r'((#[-0-9]+)|(\$\w+))', Name.Entity), - # Keywords - (r'\b(if|else|elseif|endif|for|endfor|fork|endfork|while' - r'|endwhile|break|continue|return|try' - r'|except|endtry|finally|in)\b', Keyword), - # builtins - (r'(random|length)', Name.Builtin), - # special variables - (r'(player|caller|this|args)', Name.Variable.Instance), - # skip whitespace - (r'\s+', Text), - (r'\n', Text), - # other operators - (r'([!;=,{}&|:.\[\]@()<>?]+)', Operator), - # function call - (r'(\w+)(\()', bygroups(Name.Function, Operator)), - # variables - (r'(\w+)', Text), - ] - } - - -class HybrisLexer(RegexLexer): - """ - For `Hybris `_ source code. - - .. versionadded:: 1.4 - """ - - name = 'Hybris' - aliases = ['hybris', 'hy'] - filenames = ['*.hy', '*.hyb'] - mimetypes = ['text/x-hybris', 'application/x-hybris'] - - flags = re.MULTILINE | re.DOTALL - - tokens = { - 'root': [ - # method names - (r'^(\s*(?:function|method|operator\s+)+?)' - r'([a-zA-Z_]\w*)' - r'(\s*)(\()', bygroups(Keyword, Name.Function, Text, Operator)), - (r'[^\S\n]+', Text), - (r'//.*?\n', Comment.Single), - (r'/\*.*?\*/', Comment.Multiline), - (r'@[a-zA-Z_][\w.]*', Name.Decorator), - (r'(break|case|catch|next|default|do|else|finally|for|foreach|of|' - r'unless|if|new|return|switch|me|throw|try|while)\b', Keyword), - (r'(extends|private|protected|public|static|throws|function|method|' - r'operator)\b', Keyword.Declaration), - (r'(true|false|null|__FILE__|__LINE__|__VERSION__|__LIB_PATH__|' - r'__INC_PATH__)\b', Keyword.Constant), - (r'(class|struct)(\s+)', - bygroups(Keyword.Declaration, Text), 'class'), - (r'(import|include)(\s+)', - bygroups(Keyword.Namespace, Text), 'import'), - (words(( - 'gc_collect', 'gc_mm_items', 'gc_mm_usage', 'gc_collect_threshold', - 'urlencode', 'urldecode', 'base64encode', 'base64decode', 'sha1', 'crc32', - 'sha2', 'md5', 'md5_file', 'acos', 'asin', 'atan', 'atan2', 'ceil', 'cos', - 'cosh', 'exp', 'fabs', 'floor', 'fmod', 'log', 'log10', 'pow', 'sin', - 'sinh', 'sqrt', 'tan', 'tanh', 'isint', 'isfloat', 'ischar', 'isstring', - 'isarray', 'ismap', 'isalias', 'typeof', 'sizeof', 'toint', 'tostring', - 'fromxml', 'toxml', 'binary', 'pack', 'load', 'eval', 'var_names', - 'var_values', 'user_functions', 'dyn_functions', 'methods', 'call', - 'call_method', 'mknod', 'mkfifo', 'mount', 'umount2', 'umount', 'ticks', - 'usleep', 'sleep', 'time', 'strtime', 'strdate', 'dllopen', 'dlllink', - 'dllcall', 'dllcall_argv', 'dllclose', 'env', 'exec', 'fork', 'getpid', - 'wait', 'popen', 'pclose', 'exit', 'kill', 'pthread_create', - 'pthread_create_argv', 'pthread_exit', 'pthread_join', 'pthread_kill', - 'smtp_send', 'http_get', 'http_post', 'http_download', 'socket', 'bind', - 'listen', 'accept', 'getsockname', 'getpeername', 'settimeout', 'connect', - 'server', 'recv', 'send', 'close', 'print', 'println', 'printf', 'input', - 'readline', 'serial_open', 'serial_fcntl', 'serial_get_attr', - 'serial_get_ispeed', 'serial_get_ospeed', 'serial_set_attr', - 'serial_set_ispeed', 'serial_set_ospeed', 'serial_write', 'serial_read', - 'serial_close', 'xml_load', 'xml_parse', 'fopen', 'fseek', 'ftell', - 'fsize', 'fread', 'fwrite', 'fgets', 'fclose', 'file', 'readdir', - 'pcre_replace', 'size', 'pop', 'unmap', 'has', 'keys', 'values', - 'length', 'find', 'substr', 'replace', 'split', 'trim', 'remove', - 'contains', 'join'), suffix=r'\b'), - Name.Builtin), - (words(( - 'MethodReference', 'Runner', 'Dll', 'Thread', 'Pipe', 'Process', - 'Runnable', 'CGI', 'ClientSocket', 'Socket', 'ServerSocket', - 'File', 'Console', 'Directory', 'Exception'), suffix=r'\b'), - Keyword.Type), - (r'"(\\\\|\\"|[^"])*"', String), - (r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char), - (r'(\.)([a-zA-Z_]\w*)', - bygroups(Operator, Name.Attribute)), - (r'[a-zA-Z_]\w*:', Name.Label), - (r'[a-zA-Z_$]\w*', Name), - (r'[~^*!%&\[\](){}<>|+=:;,./?\-@]+', Operator), - (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), - (r'0x[0-9a-f]+', Number.Hex), - (r'[0-9]+L?', Number.Integer), - (r'\n', Text), - ], - 'class': [ - (r'[a-zA-Z_]\w*', Name.Class, '#pop') - ], - 'import': [ - (r'[\w.]+\*?', Name.Namespace, '#pop') - ], - } - - -class EasytrieveLexer(RegexLexer): - """ - Easytrieve Plus is a programming language for extracting, filtering and - converting sequential data. Furthermore it can layout data for reports. - It is mainly used on mainframe platforms and can access several of the - mainframe's native file formats. It is somewhat comparable to awk. - - .. versionadded:: 2.1 - """ - name = 'Easytrieve' - aliases = ['easytrieve'] - filenames = ['*.ezt', '*.mac'] - mimetypes = ['text/x-easytrieve'] - flags = 0 - - # Note: We cannot use r'\b' at the start and end of keywords because - # Easytrieve Plus delimiter characters are: - # - # * space ( ) - # * apostrophe (') - # * period (.) - # * comma (,) - # * paranthesis ( and ) - # * colon (:) - # - # Additionally words end once a '*' appears, indicatins a comment. - _DELIMITERS = r' \'.,():\n' - _DELIMITERS_OR_COMENT = _DELIMITERS + '*' - _DELIMITER_PATTERN = '[' + _DELIMITERS + ']' - _DELIMITER_PATTERN_CAPTURE = '(' + _DELIMITER_PATTERN + ')' - _NON_DELIMITER_OR_COMMENT_PATTERN = '[^' + _DELIMITERS_OR_COMENT + ']' - _OPERATORS_PATTERN = u'[.+\\-/=\\[\\](){}<>;,&%¬]' - _KEYWORDS = [ - 'AFTER-BREAK', 'AFTER-LINE', 'AFTER-SCREEN', 'AIM', 'AND', 'ATTR', - 'BEFORE', 'BEFORE-BREAK', 'BEFORE-LINE', 'BEFORE-SCREEN', 'BUSHU', - 'BY', 'CALL', 'CASE', 'CHECKPOINT', 'CHKP', 'CHKP-STATUS', 'CLEAR', - 'CLOSE', 'COL', 'COLOR', 'COMMIT', 'CONTROL', 'COPY', 'CURSOR', 'D', - 'DECLARE', 'DEFAULT', 'DEFINE', 'DELETE', 'DENWA', 'DISPLAY', 'DLI', - 'DO', 'DUPLICATE', 'E', 'ELSE', 'ELSE-IF', 'END', 'END-CASE', - 'END-DO', 'END-IF', 'END-PROC', 'ENDPAGE', 'ENDTABLE', 'ENTER', 'EOF', - 'EQ', 'ERROR', 'EXIT', 'EXTERNAL', 'EZLIB', 'F1', 'F10', 'F11', 'F12', - 'F13', 'F14', 'F15', 'F16', 'F17', 'F18', 'F19', 'F2', 'F20', 'F21', - 'F22', 'F23', 'F24', 'F25', 'F26', 'F27', 'F28', 'F29', 'F3', 'F30', - 'F31', 'F32', 'F33', 'F34', 'F35', 'F36', 'F4', 'F5', 'F6', 'F7', - 'F8', 'F9', 'FETCH', 'FILE-STATUS', 'FILL', 'FINAL', 'FIRST', - 'FIRST-DUP', 'FOR', 'GE', 'GET', 'GO', 'GOTO', 'GQ', 'GR', 'GT', - 'HEADING', 'HEX', 'HIGH-VALUES', 'IDD', 'IDMS', 'IF', 'IN', 'INSERT', - 'JUSTIFY', 'KANJI-DATE', 'KANJI-DATE-LONG', 'KANJI-TIME', 'KEY', - 'KEY-PRESSED', 'KOKUGO', 'KUN', 'LAST-DUP', 'LE', 'LEVEL', 'LIKE', - 'LINE', 'LINE-COUNT', 'LINE-NUMBER', 'LINK', 'LIST', 'LOW-VALUES', - 'LQ', 'LS', 'LT', 'MACRO', 'MASK', 'MATCHED', 'MEND', 'MESSAGE', - 'MOVE', 'MSTART', 'NE', 'NEWPAGE', 'NOMASK', 'NOPRINT', 'NOT', - 'NOTE', 'NOVERIFY', 'NQ', 'NULL', 'OF', 'OR', 'OTHERWISE', 'PA1', - 'PA2', 'PA3', 'PAGE-COUNT', 'PAGE-NUMBER', 'PARM-REGISTER', - 'PATH-ID', 'PATTERN', 'PERFORM', 'POINT', 'POS', 'PRIMARY', 'PRINT', - 'PROCEDURE', 'PROGRAM', 'PUT', 'READ', 'RECORD', 'RECORD-COUNT', - 'RECORD-LENGTH', 'REFRESH', 'RELEASE', 'RENUM', 'REPEAT', 'REPORT', - 'REPORT-INPUT', 'RESHOW', 'RESTART', 'RETRIEVE', 'RETURN-CODE', - 'ROLLBACK', 'ROW', 'S', 'SCREEN', 'SEARCH', 'SECONDARY', 'SELECT', - 'SEQUENCE', 'SIZE', 'SKIP', 'SOKAKU', 'SORT', 'SQL', 'STOP', 'SUM', - 'SYSDATE', 'SYSDATE-LONG', 'SYSIN', 'SYSIPT', 'SYSLST', 'SYSPRINT', - 'SYSSNAP', 'SYSTIME', 'TALLY', 'TERM-COLUMNS', 'TERM-NAME', - 'TERM-ROWS', 'TERMINATION', 'TITLE', 'TO', 'TRANSFER', 'TRC', - 'UNIQUE', 'UNTIL', 'UPDATE', 'UPPERCASE', 'USER', 'USERID', 'VALUE', - 'VERIFY', 'W', 'WHEN', 'WHILE', 'WORK', 'WRITE', 'X', 'XDM', 'XRST' - ] - - tokens = { - 'root': [ - (r'\*.*\n', Comment.Single), - (r'\n+', Whitespace), - # Macro argument - (r'&' + _NON_DELIMITER_OR_COMMENT_PATTERN + r'+\.', Name.Variable, - 'after_macro_argument'), - # Macro call - (r'%' + _NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name.Variable), - (r'(FILE|MACRO|REPORT)(\s+)', - bygroups(Keyword.Declaration, Whitespace), 'after_declaration'), - (r'(JOB|PARM)' + r'(' + _DELIMITER_PATTERN + r')', - bygroups(Keyword.Declaration, Operator)), - (words(_KEYWORDS, suffix=_DELIMITER_PATTERN_CAPTURE), - bygroups(Keyword.Reserved, Operator)), - (_OPERATORS_PATTERN, Operator), - # Procedure declaration - (r'(' + _NON_DELIMITER_OR_COMMENT_PATTERN + r'+)(\s*)(\.?)(\s*)(PROC)(\s*\n)', - bygroups(Name.Function, Whitespace, Operator, Whitespace, - Keyword.Declaration, Whitespace)), - (r'[0-9]+\.[0-9]*', Number.Float), - (r'[0-9]+', Number.Integer), - (r"'(''|[^'])*'", String), - (r'\s+', Whitespace), - # Everything else just belongs to a name - (_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name), - ], - 'after_declaration': [ - (_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name.Function), - default('#pop'), - ], - 'after_macro_argument': [ - (r'\*.*\n', Comment.Single, '#pop'), - (r'\s+', Whitespace, '#pop'), - (_OPERATORS_PATTERN, Operator, '#pop'), - (r"'(''|[^'])*'", String, '#pop'), - # Everything else just belongs to a name - (_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name), - ], - } - _COMMENT_LINE_REGEX = re.compile(r'^\s*\*') - _MACRO_HEADER_REGEX = re.compile(r'^\s*MACRO') - - def analyse_text(text): - """ - Perform a structural analysis for basic Easytrieve constructs. - """ - result = 0.0 - lines = text.split('\n') - hasEndProc = False - hasHeaderComment = False - hasFile = False - hasJob = False - hasProc = False - hasParm = False - hasReport = False - - def isCommentLine(line): - return EasytrieveLexer._COMMENT_LINE_REGEX.match(lines[0]) is not None - - def isEmptyLine(line): - return not bool(line.strip()) - - # Remove possible empty lines and header comments. - while lines and (isEmptyLine(lines[0]) or isCommentLine(lines[0])): - if not isEmptyLine(lines[0]): - hasHeaderComment = True - del lines[0] - - if EasytrieveLexer._MACRO_HEADER_REGEX.match(lines[0]): - # Looks like an Easytrieve macro. - result = 0.4 - if hasHeaderComment: - result += 0.4 - else: - # Scan the source for lines starting with indicators. - for line in lines: - words = line.split() - if (len(words) >= 2): - firstWord = words[0] - if not hasReport: - if not hasJob: - if not hasFile: - if not hasParm: - if firstWord == 'PARM': - hasParm = True - if firstWord == 'FILE': - hasFile = True - if firstWord == 'JOB': - hasJob = True - elif firstWord == 'PROC': - hasProc = True - elif firstWord == 'END-PROC': - hasEndProc = True - elif firstWord == 'REPORT': - hasReport = True - - # Weight the findings. - if hasJob and (hasProc == hasEndProc): - if hasHeaderComment: - result += 0.1 - if hasParm: - if hasProc: - # Found PARM, JOB and PROC/END-PROC: - # pretty sure this is Easytrieve. - result += 0.8 - else: - # Found PARAM and JOB: probably this is Easytrieve - result += 0.5 - else: - # Found JOB and possibly other keywords: might be Easytrieve - result += 0.11 - if hasParm: - # Note: PARAM is not a proper English word, so this is - # regarded a much better indicator for Easytrieve than - # the other words. - result += 0.2 - if hasFile: - result += 0.01 - if hasReport: - result += 0.01 - assert 0.0 <= result <= 1.0 - return result - - -class JclLexer(RegexLexer): - """ - `Job Control Language (JCL) - `_ - is a scripting language used on mainframe platforms to instruct the system - on how to run a batch job or start a subsystem. It is somewhat - comparable to MS DOS batch and Unix shell scripts. - - .. versionadded:: 2.1 - """ - name = 'JCL' - aliases = ['jcl'] - filenames = ['*.jcl'] - mimetypes = ['text/x-jcl'] - flags = re.IGNORECASE - - tokens = { - 'root': [ - (r'//\*.*\n', Comment.Single), - (r'//', Keyword.Pseudo, 'statement'), - (r'/\*', Keyword.Pseudo, 'jes2_statement'), - # TODO: JES3 statement - (r'.*\n', Other) # Input text or inline code in any language. - ], - 'statement': [ - (r'\s*\n', Whitespace, '#pop'), - (r'([a-z]\w*)(\s+)(exec|job)(\s*)', - bygroups(Name.Label, Whitespace, Keyword.Reserved, Whitespace), - 'option'), - (r'[a-z]\w*', Name.Variable, 'statement_command'), - (r'\s+', Whitespace, 'statement_command'), - ], - 'statement_command': [ - (r'\s+(command|cntl|dd|endctl|endif|else|include|jcllib|' - r'output|pend|proc|set|then|xmit)\s+', Keyword.Reserved, 'option'), - include('option') - ], - 'jes2_statement': [ - (r'\s*\n', Whitespace, '#pop'), - (r'\$', Keyword, 'option'), - (r'\b(jobparam|message|netacct|notify|output|priority|route|' - r'setup|signoff|xeq|xmit)\b', Keyword, 'option'), - ], - 'option': [ - # (r'\n', Text, 'root'), - (r'\*', Name.Builtin), - (r'[\[\](){}<>;,]', Punctuation), - (r'[-+*/=&%]', Operator), - (r'[a-z_]\w*', Name), - (r'\d+\.\d*', Number.Float), - (r'\.\d+', Number.Float), - (r'\d+', Number.Integer), - (r"'", String, 'option_string'), - (r'[ \t]+', Whitespace, 'option_comment'), - (r'\.', Punctuation), - ], - 'option_string': [ - (r"(\n)(//)", bygroups(Text, Keyword.Pseudo)), - (r"''", String), - (r"[^']", String), - (r"'", String, '#pop'), - ], - 'option_comment': [ - # (r'\n', Text, 'root'), - (r'.+', Comment.Single), - ] - } - - _JOB_HEADER_PATTERN = re.compile(r'^//[a-z#$@][a-z0-9#$@]{0,7}\s+job(\s+.*)?$', - re.IGNORECASE) - - def analyse_text(text): - """ - Recognize JCL job by header. - """ - result = 0.0 - lines = text.split('\n') - if len(lines) > 0: - if JclLexer._JOB_HEADER_PATTERN.match(lines[0]): - result = 1.0 - assert 0.0 <= result <= 1.0 - return result - - -class MiniScriptLexer(RegexLexer): - """ - For `MiniScript `_ source code. - - .. versionadded:: 2.6 - """ - - name = "MiniScript" - aliases = ["ms", "miniscript"] - filenames = ["*.ms"] - mimetypes = ['text/x-minicript', 'application/x-miniscript'] - - tokens = { - 'root': [ - (r'#!(.*?)$', Comment.Preproc), - default('base'), - ], - 'base': [ - ('//.*$', Comment.Single), - (r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number), - (r'(?i)\d+e[+-]?\d+', Number), - (r'\d+', Number), - (r'\n', Text), - (r'[^\S\n]+', Text), - (r'"', String, 'string_double'), - (r'(==|!=|<=|>=|[=+\-*/%^<>.:])', Operator), - (r'[;,\[\]{}()]', Punctuation), - (words(( - 'break', 'continue', 'else', 'end', 'for', 'function', 'if', - 'in', 'isa', 'then', 'repeat', 'return', 'while'), suffix=r'\b'), - Keyword), - (words(( - 'abs', 'acos', 'asin', 'atan', 'ceil', 'char', 'cos', 'floor', - 'log', 'round', 'rnd', 'pi', 'sign', 'sin', 'sqrt', 'str', 'tan', - 'hasIndex', 'indexOf', 'len', 'val', 'code', 'remove', 'lower', - 'upper', 'replace', 'split', 'indexes', 'values', 'join', 'sum', - 'sort', 'shuffle', 'push', 'pop', 'pull', 'range', - 'print', 'input', 'time', 'wait', 'locals', 'globals', 'outer', - 'yield'), suffix=r'\b'), - Name.Builtin), - (r'(true|false|null)\b', Keyword.Constant), - (r'(and|or|not|new)\b', Operator.Word), - (r'(self|super|__isa)\b', Name.Builtin.Pseudo), - (r'[a-zA-Z_]\w*', Name.Variable) - ], - 'string_double': [ - (r'[^"\n]+', String), - (r'""', String), - (r'"', String, '#pop'), - (r'\n', Text, '#pop'), # Stray linefeed also terminates strings. - ] - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.scripting + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexer for scripting and embedded languages. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, include, bygroups, default, combined, \ + words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Error, Whitespace, Other +from pygments.util import get_bool_opt, get_list_opt + +__all__ = ['LuaLexer', 'MoonScriptLexer', 'ChaiscriptLexer', 'LSLLexer', + 'AppleScriptLexer', 'RexxLexer', 'MOOCodeLexer', 'HybrisLexer', + 'EasytrieveLexer', 'JclLexer', 'MiniScriptLexer'] + + +class LuaLexer(RegexLexer): + """ + For `Lua `_ source code. + + Additional options accepted: + + `func_name_highlighting` + If given and ``True``, highlight builtin function names + (default: ``True``). + `disabled_modules` + If given, must be a list of module names whose function names + should not be highlighted. By default all modules are highlighted. + + To get a list of allowed modules have a look into the + `_lua_builtins` module: + + .. sourcecode:: pycon + + >>> from pygments.lexers._lua_builtins import MODULES + >>> MODULES.keys() + ['string', 'coroutine', 'modules', 'io', 'basic', ...] + """ + + name = 'Lua' + aliases = ['lua'] + filenames = ['*.lua', '*.wlua'] + mimetypes = ['text/x-lua', 'application/x-lua'] + + _comment_multiline = r'(?:--\[(?P=*)\[[\w\W]*?\](?P=level)\])' + _comment_single = r'(?:--.*$)' + _space = r'(?:\s+)' + _s = r'(?:%s|%s|%s)' % (_comment_multiline, _comment_single, _space) + _name = r'(?:[^\W\d]\w*)' + + tokens = { + 'root': [ + # Lua allows a file to start with a shebang. + (r'#!.*', Comment.Preproc), + default('base'), + ], + 'ws': [ + (_comment_multiline, Comment.Multiline), + (_comment_single, Comment.Single), + (_space, Text), + ], + 'base': [ + include('ws'), + + (r'(?i)0x[\da-f]*(\.[\da-f]*)?(p[+-]?\d+)?', Number.Hex), + (r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float), + (r'(?i)\d+e[+-]?\d+', Number.Float), + (r'\d+', Number.Integer), + + # multiline strings + (r'(?s)\[(=*)\[.*?\]\1\]', String), + + (r'::', Punctuation, 'label'), + (r'\.{3}', Punctuation), + (r'[=<>|~&+\-*/%#^]+|\.\.', Operator), + (r'[\[\]{}().,:;]', Punctuation), + (r'(and|or|not)\b', Operator.Word), + + ('(break|do|else|elseif|end|for|if|in|repeat|return|then|until|' + r'while)\b', Keyword.Reserved), + (r'goto\b', Keyword.Reserved, 'goto'), + (r'(local)\b', Keyword.Declaration), + (r'(true|false|nil)\b', Keyword.Constant), + + (r'(function)\b', Keyword.Reserved, 'funcname'), + + (r'[A-Za-z_]\w*(\.[A-Za-z_]\w*)?', Name), + + ("'", String.Single, combined('stringescape', 'sqs')), + ('"', String.Double, combined('stringescape', 'dqs')) + ], + + 'funcname': [ + include('ws'), + (r'[.:]', Punctuation), + (r'%s(?=%s*[.:])' % (_name, _s), Name.Class), + (_name, Name.Function, '#pop'), + # inline function + (r'\(', Punctuation, '#pop'), + ], + + 'goto': [ + include('ws'), + (_name, Name.Label, '#pop'), + ], + + 'label': [ + include('ws'), + (r'::', Punctuation, '#pop'), + (_name, Name.Label), + ], + + 'stringescape': [ + (r'\\([abfnrtv\\"\']|[\r\n]{1,2}|z\s*|x[0-9a-fA-F]{2}|\d{1,3}|' + r'u\{[0-9a-fA-F]+\})', String.Escape), + ], + + 'sqs': [ + (r"'", String.Single, '#pop'), + (r"[^\\']+", String.Single), + ], + + 'dqs': [ + (r'"', String.Double, '#pop'), + (r'[^\\"]+', String.Double), + ] + } + + def __init__(self, **options): + self.func_name_highlighting = get_bool_opt( + options, 'func_name_highlighting', True) + self.disabled_modules = get_list_opt(options, 'disabled_modules', []) + + self._functions = set() + if self.func_name_highlighting: + from pygments.lexers._lua_builtins import MODULES + for mod, func in MODULES.items(): + if mod not in self.disabled_modules: + self._functions.update(func) + RegexLexer.__init__(self, **options) + + def get_tokens_unprocessed(self, text): + for index, token, value in \ + RegexLexer.get_tokens_unprocessed(self, text): + if token is Name: + if value in self._functions: + yield index, Name.Builtin, value + continue + elif '.' in value: + a, b = value.split('.') + yield index, Name, a + yield index + len(a), Punctuation, '.' + yield index + len(a) + 1, Name, b + continue + yield index, token, value + +class MoonScriptLexer(LuaLexer): + """ + For `MoonScript `_ source code. + + .. versionadded:: 1.5 + """ + + name = "MoonScript" + aliases = ["moon", "moonscript"] + filenames = ["*.moon"] + mimetypes = ['text/x-moonscript', 'application/x-moonscript'] + + tokens = { + 'root': [ + (r'#!(.*?)$', Comment.Preproc), + default('base'), + ], + 'base': [ + ('--.*$', Comment.Single), + (r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float), + (r'(?i)\d+e[+-]?\d+', Number.Float), + (r'(?i)0x[0-9a-f]*', Number.Hex), + (r'\d+', Number.Integer), + (r'\n', Text), + (r'[^\S\n]+', Text), + (r'(?s)\[(=*)\[.*?\]\1\]', String), + (r'(->|=>)', Name.Function), + (r':[a-zA-Z_]\w*', Name.Variable), + (r'(==|!=|~=|<=|>=|\.\.\.|\.\.|[=+\-*/%^<>#!.\\:])', Operator), + (r'[;,]', Punctuation), + (r'[\[\]{}()]', Keyword.Type), + (r'[a-zA-Z_]\w*:', Name.Variable), + (words(( + 'class', 'extends', 'if', 'then', 'super', 'do', 'with', + 'import', 'export', 'while', 'elseif', 'return', 'for', 'in', + 'from', 'when', 'using', 'else', 'and', 'or', 'not', 'switch', + 'break'), suffix=r'\b'), + Keyword), + (r'(true|false|nil)\b', Keyword.Constant), + (r'(and|or|not)\b', Operator.Word), + (r'(self)\b', Name.Builtin.Pseudo), + (r'@@?([a-zA-Z_]\w*)?', Name.Variable.Class), + (r'[A-Z]\w*', Name.Class), # proper name + (r'[A-Za-z_]\w*(\.[A-Za-z_]\w*)?', Name), + ("'", String.Single, combined('stringescape', 'sqs')), + ('"', String.Double, combined('stringescape', 'dqs')) + ], + 'stringescape': [ + (r'''\\([abfnrtv\\"']|\d{1,3})''', String.Escape) + ], + 'sqs': [ + ("'", String.Single, '#pop'), + (".", String) + ], + 'dqs': [ + ('"', String.Double, '#pop'), + (".", String) + ] + } + + def get_tokens_unprocessed(self, text): + # set . as Operator instead of Punctuation + for index, token, value in LuaLexer.get_tokens_unprocessed(self, text): + if token == Punctuation and value == ".": + token = Operator + yield index, token, value + + +class ChaiscriptLexer(RegexLexer): + """ + For `ChaiScript `_ source code. + + .. versionadded:: 2.0 + """ + + name = 'ChaiScript' + aliases = ['chai', 'chaiscript'] + filenames = ['*.chai'] + mimetypes = ['text/x-chaiscript', 'application/x-chaiscript'] + + flags = re.DOTALL | re.MULTILINE + + tokens = { + 'commentsandwhitespace': [ + (r'\s+', Text), + (r'//.*?\n', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline), + (r'^\#.*?\n', Comment.Single) + ], + 'slashstartsregex': [ + include('commentsandwhitespace'), + (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' + r'([gim]+\b|\B)', String.Regex, '#pop'), + (r'(?=/)', Text, ('#pop', 'badregex')), + default('#pop') + ], + 'badregex': [ + (r'\n', Text, '#pop') + ], + 'root': [ + include('commentsandwhitespace'), + (r'\n', Text), + (r'[^\S\n]+', Text), + (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|\.\.' + r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'), + (r'[{(\[;,]', Punctuation, 'slashstartsregex'), + (r'[})\].]', Punctuation), + (r'[=+\-*/]', Operator), + (r'(for|in|while|do|break|return|continue|if|else|' + r'throw|try|catch' + r')\b', Keyword, 'slashstartsregex'), + (r'(var)\b', Keyword.Declaration, 'slashstartsregex'), + (r'(attr|def|fun)\b', Keyword.Reserved), + (r'(true|false)\b', Keyword.Constant), + (r'(eval|throw)\b', Name.Builtin), + (r'`\S+`', Name.Builtin), + (r'[$a-zA-Z_]\w*', Name.Other), + (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), + (r'0x[0-9a-fA-F]+', Number.Hex), + (r'[0-9]+', Number.Integer), + (r'"', String.Double, 'dqstring'), + (r"'(\\\\|\\'|[^'])*'", String.Single), + ], + 'dqstring': [ + (r'\$\{[^"}]+?\}', String.Interpol), + (r'\$', String.Double), + (r'\\\\', String.Double), + (r'\\"', String.Double), + (r'[^\\"$]+', String.Double), + (r'"', String.Double, '#pop'), + ], + } + + +class LSLLexer(RegexLexer): + """ + For Second Life's Linden Scripting Language source code. + + .. versionadded:: 2.0 + """ + + name = 'LSL' + aliases = ['lsl'] + filenames = ['*.lsl'] + mimetypes = ['text/x-lsl'] + + flags = re.MULTILINE + + lsl_keywords = r'\b(?:do|else|for|if|jump|return|while)\b' + lsl_types = r'\b(?:float|integer|key|list|quaternion|rotation|string|vector)\b' + lsl_states = r'\b(?:(?:state)\s+\w+|default)\b' + lsl_events = r'\b(?:state_(?:entry|exit)|touch(?:_(?:start|end))?|(?:land_)?collision(?:_(?:start|end))?|timer|listen|(?:no_)?sensor|control|(?:not_)?at_(?:rot_)?target|money|email|run_time_permissions|changed|attach|dataserver|moving_(?:start|end)|link_message|(?:on|object)_rez|remote_data|http_re(?:sponse|quest)|path_update|transaction_result)\b' + lsl_functions_builtin = r'\b(?:ll(?:ReturnObjectsBy(?:ID|Owner)|Json(?:2List|[GS]etValue|ValueType)|Sin|Cos|Tan|Atan2|Sqrt|Pow|Abs|Fabs|Frand|Floor|Ceil|Round|Vec(?:Mag|Norm|Dist)|Rot(?:Between|2(?:Euler|Fwd|Left|Up))|(?:Euler|Axes)2Rot|Whisper|(?:Region|Owner)?Say|Shout|Listen(?:Control|Remove)?|Sensor(?:Repeat|Remove)?|Detected(?:Name|Key|Owner|Type|Pos|Vel|Grab|Rot|Group|LinkNumber)|Die|Ground|Wind|(?:[GS]et)(?:AnimationOverride|MemoryLimit|PrimMediaParams|ParcelMusicURL|Object(?:Desc|Name)|PhysicsMaterial|Status|Scale|Color|Alpha|Texture|Pos|Rot|Force|Torque)|ResetAnimationOverride|(?:Scale|Offset|Rotate)Texture|(?:Rot)?Target(?:Remove)?|(?:Stop)?MoveToTarget|Apply(?:Rotational)?Impulse|Set(?:KeyframedMotion|ContentType|RegionPos|(?:Angular)?Velocity|Buoyancy|HoverHeight|ForceAndTorque|TimerEvent|ScriptState|Damage|TextureAnim|Sound(?:Queueing|Radius)|Vehicle(?:Type|(?:Float|Vector|Rotation)Param)|(?:Touch|Sit)?Text|Camera(?:Eye|At)Offset|PrimitiveParams|ClickAction|Link(?:Alpha|Color|PrimitiveParams(?:Fast)?|Texture(?:Anim)?|Camera|Media)|RemoteScriptAccessPin|PayPrice|LocalRot)|ScaleByFactor|Get(?:(?:Max|Min)ScaleFactor|ClosestNavPoint|StaticPath|SimStats|Env|PrimitiveParams|Link(?:PrimitiveParams|Number(?:OfSides)?|Key|Name|Media)|HTTPHeader|FreeURLs|Object(?:Details|PermMask|PrimCount)|Parcel(?:MaxPrims|Details|Prim(?:Count|Owners))|Attached|(?:SPMax|Free|Used)Memory|Region(?:Name|TimeDilation|FPS|Corner|AgentCount)|Root(?:Position|Rotation)|UnixTime|(?:Parcel|Region)Flags|(?:Wall|GMT)clock|SimulatorHostname|BoundingBox|GeometricCenter|Creator|NumberOf(?:Prims|NotecardLines|Sides)|Animation(?:List)?|(?:Camera|Local)(?:Pos|Rot)|Vel|Accel|Omega|Time(?:stamp|OfDay)|(?:Object|CenterOf)?Mass|MassMKS|Energy|Owner|(?:Owner)?Key|SunDirection|Texture(?:Offset|Scale|Rot)|Inventory(?:Number|Name|Key|Type|Creator|PermMask)|Permissions(?:Key)?|StartParameter|List(?:Length|EntryType)|Date|Agent(?:Size|Info|Language|List)|LandOwnerAt|NotecardLine|Script(?:Name|State))|(?:Get|Reset|GetAndReset)Time|PlaySound(?:Slave)?|LoopSound(?:Master|Slave)?|(?:Trigger|Stop|Preload)Sound|(?:(?:Get|Delete)Sub|Insert)String|To(?:Upper|Lower)|Give(?:InventoryList|Money)|RezObject|(?:Stop)?LookAt|Sleep|CollisionFilter|(?:Take|Release)Controls|DetachFromAvatar|AttachToAvatar(?:Temp)?|InstantMessage|(?:GetNext)?Email|StopHover|MinEventDelay|RotLookAt|String(?:Length|Trim)|(?:Start|Stop)Animation|TargetOmega|RequestPermissions|(?:Create|Break)Link|BreakAllLinks|(?:Give|Remove)Inventory|Water|PassTouches|Request(?:Agent|Inventory)Data|TeleportAgent(?:Home|GlobalCoords)?|ModifyLand|CollisionSound|ResetScript|MessageLinked|PushObject|PassCollisions|AxisAngle2Rot|Rot2(?:Axis|Angle)|A(?:cos|sin)|AngleBetween|AllowInventoryDrop|SubStringIndex|List2(?:CSV|Integer|Json|Float|String|Key|Vector|Rot|List(?:Strided)?)|DeleteSubList|List(?:Statistics|Sort|Randomize|(?:Insert|Find|Replace)List)|EdgeOfWorld|AdjustSoundVolume|Key2Name|TriggerSoundLimited|EjectFromLand|(?:CSV|ParseString)2List|OverMyLand|SameGroup|UnSit|Ground(?:Slope|Normal|Contour)|GroundRepel|(?:Set|Remove)VehicleFlags|(?:AvatarOn)?(?:Link)?SitTarget|Script(?:Danger|Profiler)|Dialog|VolumeDetect|ResetOtherScript|RemoteLoadScriptPin|(?:Open|Close)RemoteDataChannel|SendRemoteData|RemoteDataReply|(?:Integer|String)ToBase64|XorBase64|Log(?:10)?|Base64To(?:String|Integer)|ParseStringKeepNulls|RezAtRoot|RequestSimulatorData|ForceMouselook|(?:Load|Release|(?:E|Une)scape)URL|ParcelMedia(?:CommandList|Query)|ModPow|MapDestination|(?:RemoveFrom|AddTo|Reset)Land(?:Pass|Ban)List|(?:Set|Clear)CameraParams|HTTP(?:Request|Response)|TextBox|DetectedTouch(?:UV|Face|Pos|(?:N|Bin)ormal|ST)|(?:MD5|SHA1|DumpList2)String|Request(?:Secure)?URL|Clear(?:Prim|Link)Media|(?:Link)?ParticleSystem|(?:Get|Request)(?:Username|DisplayName)|RegionSayTo|CastRay|GenerateKey|TransferLindenDollars|ManageEstateAccess|(?:Create|Delete)Character|ExecCharacterCmd|Evade|FleeFrom|NavigateTo|PatrolPoints|Pursue|UpdateCharacter|WanderWithin))\b' + lsl_constants_float = r'\b(?:DEG_TO_RAD|PI(?:_BY_TWO)?|RAD_TO_DEG|SQRT2|TWO_PI)\b' + lsl_constants_integer = r'\b(?:JSON_APPEND|STATUS_(?:PHYSICS|ROTATE_[XYZ]|PHANTOM|SANDBOX|BLOCK_GRAB(?:_OBJECT)?|(?:DIE|RETURN)_AT_EDGE|CAST_SHADOWS|OK|MALFORMED_PARAMS|TYPE_MISMATCH|BOUNDS_ERROR|NOT_(?:FOUND|SUPPORTED)|INTERNAL_ERROR|WHITELIST_FAILED)|AGENT(?:_(?:BY_(?:LEGACY_|USER)NAME|FLYING|ATTACHMENTS|SCRIPTED|MOUSELOOK|SITTING|ON_OBJECT|AWAY|WALKING|IN_AIR|TYPING|CROUCHING|BUSY|ALWAYS_RUN|AUTOPILOT|LIST_(?:PARCEL(?:_OWNER)?|REGION)))?|CAMERA_(?:PITCH|DISTANCE|BEHINDNESS_(?:ANGLE|LAG)|(?:FOCUS|POSITION)(?:_(?:THRESHOLD|LOCKED|LAG))?|FOCUS_OFFSET|ACTIVE)|ANIM_ON|LOOP|REVERSE|PING_PONG|SMOOTH|ROTATE|SCALE|ALL_SIDES|LINK_(?:ROOT|SET|ALL_(?:OTHERS|CHILDREN)|THIS)|ACTIVE|PASSIVE|SCRIPTED|CONTROL_(?:FWD|BACK|(?:ROT_)?(?:LEFT|RIGHT)|UP|DOWN|(?:ML_)?LBUTTON)|PERMISSION_(?:RETURN_OBJECTS|DEBIT|OVERRIDE_ANIMATIONS|SILENT_ESTATE_MANAGEMENT|TAKE_CONTROLS|TRIGGER_ANIMATION|ATTACH|CHANGE_LINKS|(?:CONTROL|TRACK)_CAMERA|TELEPORT)|INVENTORY_(?:TEXTURE|SOUND|OBJECT|SCRIPT|LANDMARK|CLOTHING|NOTECARD|BODYPART|ANIMATION|GESTURE|ALL|NONE)|CHANGED_(?:INVENTORY|COLOR|SHAPE|SCALE|TEXTURE|LINK|ALLOWED_DROP|OWNER|REGION(?:_START)?|TELEPORT|MEDIA)|OBJECT_(?:(?:PHYSICS|SERVER|STREAMING)_COST|UNKNOWN_DETAIL|CHARACTER_TIME|PHANTOM|PHYSICS|TEMP_ON_REZ|NAME|DESC|POS|PRIM_EQUIVALENCE|RETURN_(?:PARCEL(?:_OWNER)?|REGION)|ROO?T|VELOCITY|OWNER|GROUP|CREATOR|ATTACHED_POINT|RENDER_WEIGHT|PATHFINDING_TYPE|(?:RUNNING|TOTAL)_SCRIPT_COUNT|SCRIPT_(?:MEMORY|TIME))|TYPE_(?:INTEGER|FLOAT|STRING|KEY|VECTOR|ROTATION|INVALID)|(?:DEBUG|PUBLIC)_CHANNEL|ATTACH_(?:AVATAR_CENTER|CHEST|HEAD|BACK|PELVIS|MOUTH|CHIN|NECK|NOSE|BELLY|[LR](?:SHOULDER|HAND|FOOT|EAR|EYE|[UL](?:ARM|LEG)|HIP)|(?:LEFT|RIGHT)_PEC|HUD_(?:CENTER_[12]|TOP_(?:RIGHT|CENTER|LEFT)|BOTTOM(?:_(?:RIGHT|LEFT))?))|LAND_(?:LEVEL|RAISE|LOWER|SMOOTH|NOISE|REVERT)|DATA_(?:ONLINE|NAME|BORN|SIM_(?:POS|STATUS|RATING)|PAYINFO)|PAYMENT_INFO_(?:ON_FILE|USED)|REMOTE_DATA_(?:CHANNEL|REQUEST|REPLY)|PSYS_(?:PART_(?:BF_(?:ZERO|ONE(?:_MINUS_(?:DEST_COLOR|SOURCE_(ALPHA|COLOR)))?|DEST_COLOR|SOURCE_(ALPHA|COLOR))|BLEND_FUNC_(DEST|SOURCE)|FLAGS|(?:START|END)_(?:COLOR|ALPHA|SCALE|GLOW)|MAX_AGE|(?:RIBBON|WIND|INTERP_(?:COLOR|SCALE)|BOUNCE|FOLLOW_(?:SRC|VELOCITY)|TARGET_(?:POS|LINEAR)|EMISSIVE)_MASK)|SRC_(?:MAX_AGE|PATTERN|ANGLE_(?:BEGIN|END)|BURST_(?:RATE|PART_COUNT|RADIUS|SPEED_(?:MIN|MAX))|ACCEL|TEXTURE|TARGET_KEY|OMEGA|PATTERN_(?:DROP|EXPLODE|ANGLE(?:_CONE(?:_EMPTY)?)?)))|VEHICLE_(?:REFERENCE_FRAME|TYPE_(?:NONE|SLED|CAR|BOAT|AIRPLANE|BALLOON)|(?:LINEAR|ANGULAR)_(?:FRICTION_TIMESCALE|MOTOR_DIRECTION)|LINEAR_MOTOR_OFFSET|HOVER_(?:HEIGHT|EFFICIENCY|TIMESCALE)|BUOYANCY|(?:LINEAR|ANGULAR)_(?:DEFLECTION_(?:EFFICIENCY|TIMESCALE)|MOTOR_(?:DECAY_)?TIMESCALE)|VERTICAL_ATTRACTION_(?:EFFICIENCY|TIMESCALE)|BANKING_(?:EFFICIENCY|MIX|TIMESCALE)|FLAG_(?:NO_DEFLECTION_UP|LIMIT_(?:ROLL_ONLY|MOTOR_UP)|HOVER_(?:(?:WATER|TERRAIN|UP)_ONLY|GLOBAL_HEIGHT)|MOUSELOOK_(?:STEER|BANK)|CAMERA_DECOUPLED))|PRIM_(?:TYPE(?:_(?:BOX|CYLINDER|PRISM|SPHERE|TORUS|TUBE|RING|SCULPT))?|HOLE_(?:DEFAULT|CIRCLE|SQUARE|TRIANGLE)|MATERIAL(?:_(?:STONE|METAL|GLASS|WOOD|FLESH|PLASTIC|RUBBER))?|SHINY_(?:NONE|LOW|MEDIUM|HIGH)|BUMP_(?:NONE|BRIGHT|DARK|WOOD|BARK|BRICKS|CHECKER|CONCRETE|TILE|STONE|DISKS|GRAVEL|BLOBS|SIDING|LARGETILE|STUCCO|SUCTION|WEAVE)|TEXGEN_(?:DEFAULT|PLANAR)|SCULPT_(?:TYPE_(?:SPHERE|TORUS|PLANE|CYLINDER|MASK)|FLAG_(?:MIRROR|INVERT))|PHYSICS(?:_(?:SHAPE_(?:CONVEX|NONE|PRIM|TYPE)))?|(?:POS|ROT)_LOCAL|SLICE|TEXT|FLEXIBLE|POINT_LIGHT|TEMP_ON_REZ|PHANTOM|POSITION|SIZE|ROTATION|TEXTURE|NAME|OMEGA|DESC|LINK_TARGET|COLOR|BUMP_SHINY|FULLBRIGHT|TEXGEN|GLOW|MEDIA_(?:ALT_IMAGE_ENABLE|CONTROLS|(?:CURRENT|HOME)_URL|AUTO_(?:LOOP|PLAY|SCALE|ZOOM)|FIRST_CLICK_INTERACT|(?:WIDTH|HEIGHT)_PIXELS|WHITELIST(?:_ENABLE)?|PERMS_(?:INTERACT|CONTROL)|PARAM_MAX|CONTROLS_(?:STANDARD|MINI)|PERM_(?:NONE|OWNER|GROUP|ANYONE)|MAX_(?:URL_LENGTH|WHITELIST_(?:SIZE|COUNT)|(?:WIDTH|HEIGHT)_PIXELS)))|MASK_(?:BASE|OWNER|GROUP|EVERYONE|NEXT)|PERM_(?:TRANSFER|MODIFY|COPY|MOVE|ALL)|PARCEL_(?:MEDIA_COMMAND_(?:STOP|PAUSE|PLAY|LOOP|TEXTURE|URL|TIME|AGENT|UNLOAD|AUTO_ALIGN|TYPE|SIZE|DESC|LOOP_SET)|FLAG_(?:ALLOW_(?:FLY|(?:GROUP_)?SCRIPTS|LANDMARK|TERRAFORM|DAMAGE|CREATE_(?:GROUP_)?OBJECTS)|USE_(?:ACCESS_(?:GROUP|LIST)|BAN_LIST|LAND_PASS_LIST)|LOCAL_SOUND_ONLY|RESTRICT_PUSHOBJECT|ALLOW_(?:GROUP|ALL)_OBJECT_ENTRY)|COUNT_(?:TOTAL|OWNER|GROUP|OTHER|SELECTED|TEMP)|DETAILS_(?:NAME|DESC|OWNER|GROUP|AREA|ID|SEE_AVATARS))|LIST_STAT_(?:MAX|MIN|MEAN|MEDIAN|STD_DEV|SUM(?:_SQUARES)?|NUM_COUNT|GEOMETRIC_MEAN|RANGE)|PAY_(?:HIDE|DEFAULT)|REGION_FLAG_(?:ALLOW_DAMAGE|FIXED_SUN|BLOCK_TERRAFORM|SANDBOX|DISABLE_(?:COLLISIONS|PHYSICS)|BLOCK_FLY|ALLOW_DIRECT_TELEPORT|RESTRICT_PUSHOBJECT)|HTTP_(?:METHOD|MIMETYPE|BODY_(?:MAXLENGTH|TRUNCATED)|CUSTOM_HEADER|PRAGMA_NO_CACHE|VERBOSE_THROTTLE|VERIFY_CERT)|STRING_(?:TRIM(?:_(?:HEAD|TAIL))?)|CLICK_ACTION_(?:NONE|TOUCH|SIT|BUY|PAY|OPEN(?:_MEDIA)?|PLAY|ZOOM)|TOUCH_INVALID_FACE|PROFILE_(?:NONE|SCRIPT_MEMORY)|RC_(?:DATA_FLAGS|DETECT_PHANTOM|GET_(?:LINK_NUM|NORMAL|ROOT_KEY)|MAX_HITS|REJECT_(?:TYPES|AGENTS|(?:NON)?PHYSICAL|LAND))|RCERR_(?:CAST_TIME_EXCEEDED|SIM_PERF_LOW|UNKNOWN)|ESTATE_ACCESS_(?:ALLOWED_(?:AGENT|GROUP)_(?:ADD|REMOVE)|BANNED_AGENT_(?:ADD|REMOVE))|DENSITY|FRICTION|RESTITUTION|GRAVITY_MULTIPLIER|KFM_(?:COMMAND|CMD_(?:PLAY|STOP|PAUSE|SET_MODE)|MODE|FORWARD|LOOP|PING_PONG|REVERSE|DATA|ROTATION|TRANSLATION)|ERR_(?:GENERIC|PARCEL_PERMISSIONS|MALFORMED_PARAMS|RUNTIME_PERMISSIONS|THROTTLED)|CHARACTER_(?:CMD_(?:(?:SMOOTH_)?STOP|JUMP)|DESIRED_(?:TURN_)?SPEED|RADIUS|STAY_WITHIN_PARCEL|LENGTH|ORIENTATION|ACCOUNT_FOR_SKIPPED_FRAMES|AVOIDANCE_MODE|TYPE(?:_(?:[A-D]|NONE))?|MAX_(?:DECEL|TURN_RADIUS|(?:ACCEL|SPEED)))|PURSUIT_(?:OFFSET|FUZZ_FACTOR|GOAL_TOLERANCE|INTERCEPT)|REQUIRE_LINE_OF_SIGHT|FORCE_DIRECT_PATH|VERTICAL|HORIZONTAL|AVOID_(?:CHARACTERS|DYNAMIC_OBSTACLES|NONE)|PU_(?:EVADE_(?:HIDDEN|SPOTTED)|FAILURE_(?:DYNAMIC_PATHFINDING_DISABLED|INVALID_(?:GOAL|START)|NO_(?:NAVMESH|VALID_DESTINATION)|OTHER|TARGET_GONE|(?:PARCEL_)?UNREACHABLE)|(?:GOAL|SLOWDOWN_DISTANCE)_REACHED)|TRAVERSAL_TYPE(?:_(?:FAST|NONE|SLOW))?|CONTENT_TYPE_(?:ATOM|FORM|HTML|JSON|LLSD|RSS|TEXT|XHTML|XML)|GCNP_(?:RADIUS|STATIC)|(?:PATROL|WANDER)_PAUSE_AT_WAYPOINTS|OPT_(?:AVATAR|CHARACTER|EXCLUSION_VOLUME|LEGACY_LINKSET|MATERIAL_VOLUME|OTHER|STATIC_OBSTACLE|WALKABLE)|SIM_STAT_PCT_CHARS_STEPPED)\b' + lsl_constants_integer_boolean = r'\b(?:FALSE|TRUE)\b' + lsl_constants_rotation = r'\b(?:ZERO_ROTATION)\b' + lsl_constants_string = r'\b(?:EOF|JSON_(?:ARRAY|DELETE|FALSE|INVALID|NULL|NUMBER|OBJECT|STRING|TRUE)|NULL_KEY|TEXTURE_(?:BLANK|DEFAULT|MEDIA|PLYWOOD|TRANSPARENT)|URL_REQUEST_(?:GRANTED|DENIED))\b' + lsl_constants_vector = r'\b(?:TOUCH_INVALID_(?:TEXCOORD|VECTOR)|ZERO_VECTOR)\b' + lsl_invalid_broken = r'\b(?:LAND_(?:LARGE|MEDIUM|SMALL)_BRUSH)\b' + lsl_invalid_deprecated = r'\b(?:ATTACH_[LR]PEC|DATA_RATING|OBJECT_ATTACHMENT_(?:GEOMETRY_BYTES|SURFACE_AREA)|PRIM_(?:CAST_SHADOWS|MATERIAL_LIGHT|TYPE_LEGACY)|PSYS_SRC_(?:INNER|OUTER)ANGLE|VEHICLE_FLAG_NO_FLY_UP|ll(?:Cloud|Make(?:Explosion|Fountain|Smoke|Fire)|RemoteDataSetRegion|Sound(?:Preload)?|XorBase64Strings(?:Correct)?))\b' + lsl_invalid_illegal = r'\b(?:event)\b' + lsl_invalid_unimplemented = r'\b(?:CHARACTER_(?:MAX_ANGULAR_(?:ACCEL|SPEED)|TURN_SPEED_MULTIPLIER)|PERMISSION_(?:CHANGE_(?:JOINTS|PERMISSIONS)|RELEASE_OWNERSHIP|REMAP_CONTROLS)|PRIM_PHYSICS_MATERIAL|PSYS_SRC_OBJ_REL_MASK|ll(?:CollisionSprite|(?:Stop)?PointAt|(?:(?:Refresh|Set)Prim)URL|(?:Take|Release)Camera|RemoteLoadScript))\b' + lsl_reserved_godmode = r'\b(?:ll(?:GodLikeRezObject|Set(?:Inventory|Object)PermMask))\b' + lsl_reserved_log = r'\b(?:print)\b' + lsl_operators = r'\+\+|\-\-|<<|>>|&&?|\|\|?|\^|~|[!%<>=*+\-/]=?' + + tokens = { + 'root': + [ + (r'//.*?\n', Comment.Single), + (r'/\*', Comment.Multiline, 'comment'), + (r'"', String.Double, 'string'), + (lsl_keywords, Keyword), + (lsl_types, Keyword.Type), + (lsl_states, Name.Class), + (lsl_events, Name.Builtin), + (lsl_functions_builtin, Name.Function), + (lsl_constants_float, Keyword.Constant), + (lsl_constants_integer, Keyword.Constant), + (lsl_constants_integer_boolean, Keyword.Constant), + (lsl_constants_rotation, Keyword.Constant), + (lsl_constants_string, Keyword.Constant), + (lsl_constants_vector, Keyword.Constant), + (lsl_invalid_broken, Error), + (lsl_invalid_deprecated, Error), + (lsl_invalid_illegal, Error), + (lsl_invalid_unimplemented, Error), + (lsl_reserved_godmode, Keyword.Reserved), + (lsl_reserved_log, Keyword.Reserved), + (r'\b([a-zA-Z_]\w*)\b', Name.Variable), + (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d*', Number.Float), + (r'(\d+\.\d*|\.\d+)', Number.Float), + (r'0[xX][0-9a-fA-F]+', Number.Hex), + (r'\d+', Number.Integer), + (lsl_operators, Operator), + (r':=?', Error), + (r'[,;{}()\[\]]', Punctuation), + (r'\n+', Whitespace), + (r'\s+', Whitespace) + ], + 'comment': + [ + (r'[^*/]+', Comment.Multiline), + (r'/\*', Comment.Multiline, '#push'), + (r'\*/', Comment.Multiline, '#pop'), + (r'[*/]', Comment.Multiline) + ], + 'string': + [ + (r'\\([nt"\\])', String.Escape), + (r'"', String.Double, '#pop'), + (r'\\.', Error), + (r'[^"\\]+', String.Double), + ] + } + + +class AppleScriptLexer(RegexLexer): + """ + For `AppleScript source code + `_, + including `AppleScript Studio + `_. + Contributed by Andreas Amann . + + .. versionadded:: 1.0 + """ + + name = 'AppleScript' + aliases = ['applescript'] + filenames = ['*.applescript'] + + flags = re.MULTILINE | re.DOTALL + + Identifiers = r'[a-zA-Z]\w*' + + # XXX: use words() for all of these + Literals = ('AppleScript', 'current application', 'false', 'linefeed', + 'missing value', 'pi', 'quote', 'result', 'return', 'space', + 'tab', 'text item delimiters', 'true', 'version') + Classes = ('alias ', 'application ', 'boolean ', 'class ', 'constant ', + 'date ', 'file ', 'integer ', 'list ', 'number ', 'POSIX file ', + 'real ', 'record ', 'reference ', 'RGB color ', 'script ', + 'text ', 'unit types', '(?:Unicode )?text', 'string') + BuiltIn = ('attachment', 'attribute run', 'character', 'day', 'month', + 'paragraph', 'word', 'year') + HandlerParams = ('about', 'above', 'against', 'apart from', 'around', + 'aside from', 'at', 'below', 'beneath', 'beside', + 'between', 'for', 'given', 'instead of', 'on', 'onto', + 'out of', 'over', 'since') + Commands = ('ASCII (character|number)', 'activate', 'beep', 'choose URL', + 'choose application', 'choose color', 'choose file( name)?', + 'choose folder', 'choose from list', + 'choose remote application', 'clipboard info', + 'close( access)?', 'copy', 'count', 'current date', 'delay', + 'delete', 'display (alert|dialog)', 'do shell script', + 'duplicate', 'exists', 'get eof', 'get volume settings', + 'info for', 'launch', 'list (disks|folder)', 'load script', + 'log', 'make', 'mount volume', 'new', 'offset', + 'open( (for access|location))?', 'path to', 'print', 'quit', + 'random number', 'read', 'round', 'run( script)?', + 'say', 'scripting components', + 'set (eof|the clipboard to|volume)', 'store script', + 'summarize', 'system attribute', 'system info', + 'the clipboard', 'time to GMT', 'write', 'quoted form') + References = ('(in )?back of', '(in )?front of', '[0-9]+(st|nd|rd|th)', + 'first', 'second', 'third', 'fourth', 'fifth', 'sixth', + 'seventh', 'eighth', 'ninth', 'tenth', 'after', 'back', + 'before', 'behind', 'every', 'front', 'index', 'last', + 'middle', 'some', 'that', 'through', 'thru', 'where', 'whose') + Operators = ("and", "or", "is equal", "equals", "(is )?equal to", "is not", + "isn't", "isn't equal( to)?", "is not equal( to)?", + "doesn't equal", "does not equal", "(is )?greater than", + "comes after", "is not less than or equal( to)?", + "isn't less than or equal( to)?", "(is )?less than", + "comes before", "is not greater than or equal( to)?", + "isn't greater than or equal( to)?", + "(is )?greater than or equal( to)?", "is not less than", + "isn't less than", "does not come before", + "doesn't come before", "(is )?less than or equal( to)?", + "is not greater than", "isn't greater than", + "does not come after", "doesn't come after", "starts? with", + "begins? with", "ends? with", "contains?", "does not contain", + "doesn't contain", "is in", "is contained by", "is not in", + "is not contained by", "isn't contained by", "div", "mod", + "not", "(a )?(ref( to)?|reference to)", "is", "does") + Control = ('considering', 'else', 'error', 'exit', 'from', 'if', + 'ignoring', 'in', 'repeat', 'tell', 'then', 'times', 'to', + 'try', 'until', 'using terms from', 'while', 'whith', + 'with timeout( of)?', 'with transaction', 'by', 'continue', + 'end', 'its?', 'me', 'my', 'return', 'of', 'as') + Declarations = ('global', 'local', 'prop(erty)?', 'set', 'get') + Reserved = ('but', 'put', 'returning', 'the') + StudioClasses = ('action cell', 'alert reply', 'application', 'box', + 'browser( cell)?', 'bundle', 'button( cell)?', 'cell', + 'clip view', 'color well', 'color-panel', + 'combo box( item)?', 'control', + 'data( (cell|column|item|row|source))?', 'default entry', + 'dialog reply', 'document', 'drag info', 'drawer', + 'event', 'font(-panel)?', 'formatter', + 'image( (cell|view))?', 'matrix', 'menu( item)?', 'item', + 'movie( view)?', 'open-panel', 'outline view', 'panel', + 'pasteboard', 'plugin', 'popup button', + 'progress indicator', 'responder', 'save-panel', + 'scroll view', 'secure text field( cell)?', 'slider', + 'sound', 'split view', 'stepper', 'tab view( item)?', + 'table( (column|header cell|header view|view))', + 'text( (field( cell)?|view))?', 'toolbar( item)?', + 'user-defaults', 'view', 'window') + StudioEvents = ('accept outline drop', 'accept table drop', 'action', + 'activated', 'alert ended', 'awake from nib', 'became key', + 'became main', 'begin editing', 'bounds changed', + 'cell value', 'cell value changed', 'change cell value', + 'change item value', 'changed', 'child of item', + 'choose menu item', 'clicked', 'clicked toolbar item', + 'closed', 'column clicked', 'column moved', + 'column resized', 'conclude drop', 'data representation', + 'deminiaturized', 'dialog ended', 'document nib name', + 'double clicked', 'drag( (entered|exited|updated))?', + 'drop', 'end editing', 'exposed', 'idle', 'item expandable', + 'item value', 'item value changed', 'items changed', + 'keyboard down', 'keyboard up', 'launched', + 'load data representation', 'miniaturized', 'mouse down', + 'mouse dragged', 'mouse entered', 'mouse exited', + 'mouse moved', 'mouse up', 'moved', + 'number of browser rows', 'number of items', + 'number of rows', 'open untitled', 'opened', 'panel ended', + 'parameters updated', 'plugin loaded', 'prepare drop', + 'prepare outline drag', 'prepare outline drop', + 'prepare table drag', 'prepare table drop', + 'read from file', 'resigned active', 'resigned key', + 'resigned main', 'resized( sub views)?', + 'right mouse down', 'right mouse dragged', + 'right mouse up', 'rows changed', 'scroll wheel', + 'selected tab view item', 'selection changed', + 'selection changing', 'should begin editing', + 'should close', 'should collapse item', + 'should end editing', 'should expand item', + 'should open( untitled)?', + 'should quit( after last window closed)?', + 'should select column', 'should select item', + 'should select row', 'should select tab view item', + 'should selection change', 'should zoom', 'shown', + 'update menu item', 'update parameters', + 'update toolbar item', 'was hidden', 'was miniaturized', + 'will become active', 'will close', 'will dismiss', + 'will display browser cell', 'will display cell', + 'will display item cell', 'will display outline cell', + 'will finish launching', 'will hide', 'will miniaturize', + 'will move', 'will open', 'will pop up', 'will quit', + 'will resign active', 'will resize( sub views)?', + 'will select tab view item', 'will show', 'will zoom', + 'write to file', 'zoomed') + StudioCommands = ('animate', 'append', 'call method', 'center', + 'close drawer', 'close panel', 'display', + 'display alert', 'display dialog', 'display panel', 'go', + 'hide', 'highlight', 'increment', 'item for', + 'load image', 'load movie', 'load nib', 'load panel', + 'load sound', 'localized string', 'lock focus', 'log', + 'open drawer', 'path for', 'pause', 'perform action', + 'play', 'register', 'resume', 'scroll', 'select( all)?', + 'show', 'size to fit', 'start', 'step back', + 'step forward', 'stop', 'synchronize', 'unlock focus', + 'update') + StudioProperties = ('accepts arrow key', 'action method', 'active', + 'alignment', 'allowed identifiers', + 'allows branch selection', 'allows column reordering', + 'allows column resizing', 'allows column selection', + 'allows customization', + 'allows editing text attributes', + 'allows empty selection', 'allows mixed state', + 'allows multiple selection', 'allows reordering', + 'allows undo', 'alpha( value)?', 'alternate image', + 'alternate increment value', 'alternate title', + 'animation delay', 'associated file name', + 'associated object', 'auto completes', 'auto display', + 'auto enables items', 'auto repeat', + 'auto resizes( outline column)?', + 'auto save expanded items', 'auto save name', + 'auto save table columns', 'auto saves configuration', + 'auto scroll', 'auto sizes all columns to fit', + 'auto sizes cells', 'background color', 'bezel state', + 'bezel style', 'bezeled', 'border rect', 'border type', + 'bordered', 'bounds( rotation)?', 'box type', + 'button returned', 'button type', + 'can choose directories', 'can choose files', + 'can draw', 'can hide', + 'cell( (background color|size|type))?', 'characters', + 'class', 'click count', 'clicked( data)? column', + 'clicked data item', 'clicked( data)? row', + 'closeable', 'collating', 'color( (mode|panel))', + 'command key down', 'configuration', + 'content(s| (size|view( margins)?))?', 'context', + 'continuous', 'control key down', 'control size', + 'control tint', 'control view', + 'controller visible', 'coordinate system', + 'copies( on scroll)?', 'corner view', 'current cell', + 'current column', 'current( field)? editor', + 'current( menu)? item', 'current row', + 'current tab view item', 'data source', + 'default identifiers', 'delta (x|y|z)', + 'destination window', 'directory', 'display mode', + 'displayed cell', 'document( (edited|rect|view))?', + 'double value', 'dragged column', 'dragged distance', + 'dragged items', 'draws( cell)? background', + 'draws grid', 'dynamically scrolls', 'echos bullets', + 'edge', 'editable', 'edited( data)? column', + 'edited data item', 'edited( data)? row', 'enabled', + 'enclosing scroll view', 'ending page', + 'error handling', 'event number', 'event type', + 'excluded from windows menu', 'executable path', + 'expanded', 'fax number', 'field editor', 'file kind', + 'file name', 'file type', 'first responder', + 'first visible column', 'flipped', 'floating', + 'font( panel)?', 'formatter', 'frameworks path', + 'frontmost', 'gave up', 'grid color', 'has data items', + 'has horizontal ruler', 'has horizontal scroller', + 'has parent data item', 'has resize indicator', + 'has shadow', 'has sub menu', 'has vertical ruler', + 'has vertical scroller', 'header cell', 'header view', + 'hidden', 'hides when deactivated', 'highlights by', + 'horizontal line scroll', 'horizontal page scroll', + 'horizontal ruler view', 'horizontally resizable', + 'icon image', 'id', 'identifier', + 'ignores multiple clicks', + 'image( (alignment|dims when disabled|frame style|scaling))?', + 'imports graphics', 'increment value', + 'indentation per level', 'indeterminate', 'index', + 'integer value', 'intercell spacing', 'item height', + 'key( (code|equivalent( modifier)?|window))?', + 'knob thickness', 'label', 'last( visible)? column', + 'leading offset', 'leaf', 'level', 'line scroll', + 'loaded', 'localized sort', 'location', 'loop mode', + 'main( (bunde|menu|window))?', 'marker follows cell', + 'matrix mode', 'maximum( content)? size', + 'maximum visible columns', + 'menu( form representation)?', 'miniaturizable', + 'miniaturized', 'minimized image', 'minimized title', + 'minimum column width', 'minimum( content)? size', + 'modal', 'modified', 'mouse down state', + 'movie( (controller|file|rect))?', 'muted', 'name', + 'needs display', 'next state', 'next text', + 'number of tick marks', 'only tick mark values', + 'opaque', 'open panel', 'option key down', + 'outline table column', 'page scroll', 'pages across', + 'pages down', 'palette label', 'pane splitter', + 'parent data item', 'parent window', 'pasteboard', + 'path( (names|separator))?', 'playing', + 'plays every frame', 'plays selection only', 'position', + 'preferred edge', 'preferred type', 'pressure', + 'previous text', 'prompt', 'properties', + 'prototype cell', 'pulls down', 'rate', + 'released when closed', 'repeated', + 'requested print time', 'required file type', + 'resizable', 'resized column', 'resource path', + 'returns records', 'reuses columns', 'rich text', + 'roll over', 'row height', 'rulers visible', + 'save panel', 'scripts path', 'scrollable', + 'selectable( identifiers)?', 'selected cell', + 'selected( data)? columns?', 'selected data items?', + 'selected( data)? rows?', 'selected item identifier', + 'selection by rect', 'send action on arrow key', + 'sends action when done editing', 'separates columns', + 'separator item', 'sequence number', 'services menu', + 'shared frameworks path', 'shared support path', + 'sheet', 'shift key down', 'shows alpha', + 'shows state by', 'size( mode)?', + 'smart insert delete enabled', 'sort case sensitivity', + 'sort column', 'sort order', 'sort type', + 'sorted( data rows)?', 'sound', 'source( mask)?', + 'spell checking enabled', 'starting page', 'state', + 'string value', 'sub menu', 'super menu', 'super view', + 'tab key traverses cells', 'tab state', 'tab type', + 'tab view', 'table view', 'tag', 'target( printer)?', + 'text color', 'text container insert', + 'text container origin', 'text returned', + 'tick mark position', 'time stamp', + 'title(d| (cell|font|height|position|rect))?', + 'tool tip', 'toolbar', 'trailing offset', 'transparent', + 'treat packages as directories', 'truncated labels', + 'types', 'unmodified characters', 'update views', + 'use sort indicator', 'user defaults', + 'uses data source', 'uses ruler', + 'uses threaded animation', + 'uses title from previous column', 'value wraps', + 'version', + 'vertical( (line scroll|page scroll|ruler view))?', + 'vertically resizable', 'view', + 'visible( document rect)?', 'volume', 'width', 'window', + 'windows menu', 'wraps', 'zoomable', 'zoomed') + + tokens = { + 'root': [ + (r'\s+', Text), + (r'¬\n', String.Escape), + (r"'s\s+", Text), # This is a possessive, consider moving + (r'(--|#).*?$', Comment), + (r'\(\*', Comment.Multiline, 'comment'), + (r'[(){}!,.:]', Punctuation), + (r'(«)([^»]+)(»)', + bygroups(Text, Name.Builtin, Text)), + (r'\b((?:considering|ignoring)\s*)' + r'(application responses|case|diacriticals|hyphens|' + r'numeric strings|punctuation|white space)', + bygroups(Keyword, Name.Builtin)), + (r'(-|\*|\+|&|≠|>=?|<=?|=|≥|≤|/|÷|\^)', Operator), + (r"\b(%s)\b" % '|'.join(Operators), Operator.Word), + (r'^(\s*(?:on|end)\s+)' + r'(%s)' % '|'.join(StudioEvents[::-1]), + bygroups(Keyword, Name.Function)), + (r'^(\s*)(in|on|script|to)(\s+)', bygroups(Text, Keyword, Text)), + (r'\b(as )(%s)\b' % '|'.join(Classes), + bygroups(Keyword, Name.Class)), + (r'\b(%s)\b' % '|'.join(Literals), Name.Constant), + (r'\b(%s)\b' % '|'.join(Commands), Name.Builtin), + (r'\b(%s)\b' % '|'.join(Control), Keyword), + (r'\b(%s)\b' % '|'.join(Declarations), Keyword), + (r'\b(%s)\b' % '|'.join(Reserved), Name.Builtin), + (r'\b(%s)s?\b' % '|'.join(BuiltIn), Name.Builtin), + (r'\b(%s)\b' % '|'.join(HandlerParams), Name.Builtin), + (r'\b(%s)\b' % '|'.join(StudioProperties), Name.Attribute), + (r'\b(%s)s?\b' % '|'.join(StudioClasses), Name.Builtin), + (r'\b(%s)\b' % '|'.join(StudioCommands), Name.Builtin), + (r'\b(%s)\b' % '|'.join(References), Name.Builtin), + (r'"(\\\\|\\"|[^"])*"', String.Double), + (r'\b(%s)\b' % Identifiers, Name.Variable), + (r'[-+]?(\d+\.\d*|\d*\.\d+)(E[-+][0-9]+)?', Number.Float), + (r'[-+]?\d+', Number.Integer), + ], + 'comment': [ + (r'\(\*', Comment.Multiline, '#push'), + (r'\*\)', Comment.Multiline, '#pop'), + ('[^*(]+', Comment.Multiline), + ('[*(]', Comment.Multiline), + ], + } + + +class RexxLexer(RegexLexer): + """ + `Rexx `_ is a scripting language available for + a wide range of different platforms with its roots found on mainframe + systems. It is popular for I/O- and data based tasks and can act as glue + language to bind different applications together. + + .. versionadded:: 2.0 + """ + name = 'Rexx' + aliases = ['rexx', 'arexx'] + filenames = ['*.rexx', '*.rex', '*.rx', '*.arexx'] + mimetypes = ['text/x-rexx'] + flags = re.IGNORECASE + + tokens = { + 'root': [ + (r'\s', Whitespace), + (r'/\*', Comment.Multiline, 'comment'), + (r'"', String, 'string_double'), + (r"'", String, 'string_single'), + (r'[0-9]+(\.[0-9]+)?(e[+-]?[0-9])?', Number), + (r'([a-z_]\w*)(\s*)(:)(\s*)(procedure)\b', + bygroups(Name.Function, Whitespace, Operator, Whitespace, + Keyword.Declaration)), + (r'([a-z_]\w*)(\s*)(:)', + bygroups(Name.Label, Whitespace, Operator)), + include('function'), + include('keyword'), + include('operator'), + (r'[a-z_]\w*', Text), + ], + 'function': [ + (words(( + 'abbrev', 'abs', 'address', 'arg', 'b2x', 'bitand', 'bitor', 'bitxor', + 'c2d', 'c2x', 'center', 'charin', 'charout', 'chars', 'compare', + 'condition', 'copies', 'd2c', 'd2x', 'datatype', 'date', 'delstr', + 'delword', 'digits', 'errortext', 'form', 'format', 'fuzz', 'insert', + 'lastpos', 'left', 'length', 'linein', 'lineout', 'lines', 'max', + 'min', 'overlay', 'pos', 'queued', 'random', 'reverse', 'right', 'sign', + 'sourceline', 'space', 'stream', 'strip', 'substr', 'subword', 'symbol', + 'time', 'trace', 'translate', 'trunc', 'value', 'verify', 'word', + 'wordindex', 'wordlength', 'wordpos', 'words', 'x2b', 'x2c', 'x2d', + 'xrange'), suffix=r'(\s*)(\()'), + bygroups(Name.Builtin, Whitespace, Operator)), + ], + 'keyword': [ + (r'(address|arg|by|call|do|drop|else|end|exit|for|forever|if|' + r'interpret|iterate|leave|nop|numeric|off|on|options|parse|' + r'pull|push|queue|return|say|select|signal|to|then|trace|until|' + r'while)\b', Keyword.Reserved), + ], + 'operator': [ + (r'(-|//|/|\(|\)|\*\*|\*|\\<<|\\<|\\==|\\=|\\>>|\\>|\\|\|\||\||' + r'&&|&|%|\+|<<=|<<|<=|<>|<|==|=|><|>=|>>=|>>|>|¬<<|¬<|¬==|¬=|' + r'¬>>|¬>|¬|\.|,)', Operator), + ], + 'string_double': [ + (r'[^"\n]+', String), + (r'""', String), + (r'"', String, '#pop'), + (r'\n', Text, '#pop'), # Stray linefeed also terminates strings. + ], + 'string_single': [ + (r'[^\'\n]', String), + (r'\'\'', String), + (r'\'', String, '#pop'), + (r'\n', Text, '#pop'), # Stray linefeed also terminates strings. + ], + 'comment': [ + (r'[^*]+', Comment.Multiline), + (r'\*/', Comment.Multiline, '#pop'), + (r'\*', Comment.Multiline), + ] + } + + _c = lambda s: re.compile(s, re.MULTILINE) + _ADDRESS_COMMAND_PATTERN = _c(r'^\s*address\s+command\b') + _ADDRESS_PATTERN = _c(r'^\s*address\s+') + _DO_WHILE_PATTERN = _c(r'^\s*do\s+while\b') + _IF_THEN_DO_PATTERN = _c(r'^\s*if\b.+\bthen\s+do\s*$') + _PROCEDURE_PATTERN = _c(r'^\s*([a-z_]\w*)(\s*)(:)(\s*)(procedure)\b') + _ELSE_DO_PATTERN = _c(r'\belse\s+do\s*$') + _PARSE_ARG_PATTERN = _c(r'^\s*parse\s+(upper\s+)?(arg|value)\b') + PATTERNS_AND_WEIGHTS = ( + (_ADDRESS_COMMAND_PATTERN, 0.2), + (_ADDRESS_PATTERN, 0.05), + (_DO_WHILE_PATTERN, 0.1), + (_ELSE_DO_PATTERN, 0.1), + (_IF_THEN_DO_PATTERN, 0.1), + (_PROCEDURE_PATTERN, 0.5), + (_PARSE_ARG_PATTERN, 0.2), + ) + + def analyse_text(text): + """ + Check for inital comment and patterns that distinguish Rexx from other + C-like languages. + """ + if re.search(r'/\*\**\s*rexx', text, re.IGNORECASE): + # Header matches MVS Rexx requirements, this is certainly a Rexx + # script. + return 1.0 + elif text.startswith('/*'): + # Header matches general Rexx requirements; the source code might + # still be any language using C comments such as C++, C# or Java. + lowerText = text.lower() + result = sum(weight + for (pattern, weight) in RexxLexer.PATTERNS_AND_WEIGHTS + if pattern.search(lowerText)) + 0.01 + return min(result, 1.0) + + +class MOOCodeLexer(RegexLexer): + """ + For `MOOCode `_ (the MOO scripting + language). + + .. versionadded:: 0.9 + """ + name = 'MOOCode' + filenames = ['*.moo'] + aliases = ['moocode', 'moo'] + mimetypes = ['text/x-moocode'] + + tokens = { + 'root': [ + # Numbers + (r'(0|[1-9][0-9_]*)', Number.Integer), + # Strings + (r'"(\\\\|\\"|[^"])*"', String), + # exceptions + (r'(E_PERM|E_DIV)', Name.Exception), + # db-refs + (r'((#[-0-9]+)|(\$\w+))', Name.Entity), + # Keywords + (r'\b(if|else|elseif|endif|for|endfor|fork|endfork|while' + r'|endwhile|break|continue|return|try' + r'|except|endtry|finally|in)\b', Keyword), + # builtins + (r'(random|length)', Name.Builtin), + # special variables + (r'(player|caller|this|args)', Name.Variable.Instance), + # skip whitespace + (r'\s+', Text), + (r'\n', Text), + # other operators + (r'([!;=,{}&|:.\[\]@()<>?]+)', Operator), + # function call + (r'(\w+)(\()', bygroups(Name.Function, Operator)), + # variables + (r'(\w+)', Text), + ] + } + + +class HybrisLexer(RegexLexer): + """ + For `Hybris `_ source code. + + .. versionadded:: 1.4 + """ + + name = 'Hybris' + aliases = ['hybris', 'hy'] + filenames = ['*.hy', '*.hyb'] + mimetypes = ['text/x-hybris', 'application/x-hybris'] + + flags = re.MULTILINE | re.DOTALL + + tokens = { + 'root': [ + # method names + (r'^(\s*(?:function|method|operator\s+)+?)' + r'([a-zA-Z_]\w*)' + r'(\s*)(\()', bygroups(Keyword, Name.Function, Text, Operator)), + (r'[^\S\n]+', Text), + (r'//.*?\n', Comment.Single), + (r'/\*.*?\*/', Comment.Multiline), + (r'@[a-zA-Z_][\w.]*', Name.Decorator), + (r'(break|case|catch|next|default|do|else|finally|for|foreach|of|' + r'unless|if|new|return|switch|me|throw|try|while)\b', Keyword), + (r'(extends|private|protected|public|static|throws|function|method|' + r'operator)\b', Keyword.Declaration), + (r'(true|false|null|__FILE__|__LINE__|__VERSION__|__LIB_PATH__|' + r'__INC_PATH__)\b', Keyword.Constant), + (r'(class|struct)(\s+)', + bygroups(Keyword.Declaration, Text), 'class'), + (r'(import|include)(\s+)', + bygroups(Keyword.Namespace, Text), 'import'), + (words(( + 'gc_collect', 'gc_mm_items', 'gc_mm_usage', 'gc_collect_threshold', + 'urlencode', 'urldecode', 'base64encode', 'base64decode', 'sha1', 'crc32', + 'sha2', 'md5', 'md5_file', 'acos', 'asin', 'atan', 'atan2', 'ceil', 'cos', + 'cosh', 'exp', 'fabs', 'floor', 'fmod', 'log', 'log10', 'pow', 'sin', + 'sinh', 'sqrt', 'tan', 'tanh', 'isint', 'isfloat', 'ischar', 'isstring', + 'isarray', 'ismap', 'isalias', 'typeof', 'sizeof', 'toint', 'tostring', + 'fromxml', 'toxml', 'binary', 'pack', 'load', 'eval', 'var_names', + 'var_values', 'user_functions', 'dyn_functions', 'methods', 'call', + 'call_method', 'mknod', 'mkfifo', 'mount', 'umount2', 'umount', 'ticks', + 'usleep', 'sleep', 'time', 'strtime', 'strdate', 'dllopen', 'dlllink', + 'dllcall', 'dllcall_argv', 'dllclose', 'env', 'exec', 'fork', 'getpid', + 'wait', 'popen', 'pclose', 'exit', 'kill', 'pthread_create', + 'pthread_create_argv', 'pthread_exit', 'pthread_join', 'pthread_kill', + 'smtp_send', 'http_get', 'http_post', 'http_download', 'socket', 'bind', + 'listen', 'accept', 'getsockname', 'getpeername', 'settimeout', 'connect', + 'server', 'recv', 'send', 'close', 'print', 'println', 'printf', 'input', + 'readline', 'serial_open', 'serial_fcntl', 'serial_get_attr', + 'serial_get_ispeed', 'serial_get_ospeed', 'serial_set_attr', + 'serial_set_ispeed', 'serial_set_ospeed', 'serial_write', 'serial_read', + 'serial_close', 'xml_load', 'xml_parse', 'fopen', 'fseek', 'ftell', + 'fsize', 'fread', 'fwrite', 'fgets', 'fclose', 'file', 'readdir', + 'pcre_replace', 'size', 'pop', 'unmap', 'has', 'keys', 'values', + 'length', 'find', 'substr', 'replace', 'split', 'trim', 'remove', + 'contains', 'join'), suffix=r'\b'), + Name.Builtin), + (words(( + 'MethodReference', 'Runner', 'Dll', 'Thread', 'Pipe', 'Process', + 'Runnable', 'CGI', 'ClientSocket', 'Socket', 'ServerSocket', + 'File', 'Console', 'Directory', 'Exception'), suffix=r'\b'), + Keyword.Type), + (r'"(\\\\|\\"|[^"])*"', String), + (r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char), + (r'(\.)([a-zA-Z_]\w*)', + bygroups(Operator, Name.Attribute)), + (r'[a-zA-Z_]\w*:', Name.Label), + (r'[a-zA-Z_$]\w*', Name), + (r'[~^*!%&\[\](){}<>|+=:;,./?\-@]+', Operator), + (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), + (r'0x[0-9a-f]+', Number.Hex), + (r'[0-9]+L?', Number.Integer), + (r'\n', Text), + ], + 'class': [ + (r'[a-zA-Z_]\w*', Name.Class, '#pop') + ], + 'import': [ + (r'[\w.]+\*?', Name.Namespace, '#pop') + ], + } + + +class EasytrieveLexer(RegexLexer): + """ + Easytrieve Plus is a programming language for extracting, filtering and + converting sequential data. Furthermore it can layout data for reports. + It is mainly used on mainframe platforms and can access several of the + mainframe's native file formats. It is somewhat comparable to awk. + + .. versionadded:: 2.1 + """ + name = 'Easytrieve' + aliases = ['easytrieve'] + filenames = ['*.ezt', '*.mac'] + mimetypes = ['text/x-easytrieve'] + flags = 0 + + # Note: We cannot use r'\b' at the start and end of keywords because + # Easytrieve Plus delimiter characters are: + # + # * space ( ) + # * apostrophe (') + # * period (.) + # * comma (,) + # * paranthesis ( and ) + # * colon (:) + # + # Additionally words end once a '*' appears, indicatins a comment. + _DELIMITERS = r' \'.,():\n' + _DELIMITERS_OR_COMENT = _DELIMITERS + '*' + _DELIMITER_PATTERN = '[' + _DELIMITERS + ']' + _DELIMITER_PATTERN_CAPTURE = '(' + _DELIMITER_PATTERN + ')' + _NON_DELIMITER_OR_COMMENT_PATTERN = '[^' + _DELIMITERS_OR_COMENT + ']' + _OPERATORS_PATTERN = '[.+\\-/=\\[\\](){}<>;,&%¬]' + _KEYWORDS = [ + 'AFTER-BREAK', 'AFTER-LINE', 'AFTER-SCREEN', 'AIM', 'AND', 'ATTR', + 'BEFORE', 'BEFORE-BREAK', 'BEFORE-LINE', 'BEFORE-SCREEN', 'BUSHU', + 'BY', 'CALL', 'CASE', 'CHECKPOINT', 'CHKP', 'CHKP-STATUS', 'CLEAR', + 'CLOSE', 'COL', 'COLOR', 'COMMIT', 'CONTROL', 'COPY', 'CURSOR', 'D', + 'DECLARE', 'DEFAULT', 'DEFINE', 'DELETE', 'DENWA', 'DISPLAY', 'DLI', + 'DO', 'DUPLICATE', 'E', 'ELSE', 'ELSE-IF', 'END', 'END-CASE', + 'END-DO', 'END-IF', 'END-PROC', 'ENDPAGE', 'ENDTABLE', 'ENTER', 'EOF', + 'EQ', 'ERROR', 'EXIT', 'EXTERNAL', 'EZLIB', 'F1', 'F10', 'F11', 'F12', + 'F13', 'F14', 'F15', 'F16', 'F17', 'F18', 'F19', 'F2', 'F20', 'F21', + 'F22', 'F23', 'F24', 'F25', 'F26', 'F27', 'F28', 'F29', 'F3', 'F30', + 'F31', 'F32', 'F33', 'F34', 'F35', 'F36', 'F4', 'F5', 'F6', 'F7', + 'F8', 'F9', 'FETCH', 'FILE-STATUS', 'FILL', 'FINAL', 'FIRST', + 'FIRST-DUP', 'FOR', 'GE', 'GET', 'GO', 'GOTO', 'GQ', 'GR', 'GT', + 'HEADING', 'HEX', 'HIGH-VALUES', 'IDD', 'IDMS', 'IF', 'IN', 'INSERT', + 'JUSTIFY', 'KANJI-DATE', 'KANJI-DATE-LONG', 'KANJI-TIME', 'KEY', + 'KEY-PRESSED', 'KOKUGO', 'KUN', 'LAST-DUP', 'LE', 'LEVEL', 'LIKE', + 'LINE', 'LINE-COUNT', 'LINE-NUMBER', 'LINK', 'LIST', 'LOW-VALUES', + 'LQ', 'LS', 'LT', 'MACRO', 'MASK', 'MATCHED', 'MEND', 'MESSAGE', + 'MOVE', 'MSTART', 'NE', 'NEWPAGE', 'NOMASK', 'NOPRINT', 'NOT', + 'NOTE', 'NOVERIFY', 'NQ', 'NULL', 'OF', 'OR', 'OTHERWISE', 'PA1', + 'PA2', 'PA3', 'PAGE-COUNT', 'PAGE-NUMBER', 'PARM-REGISTER', + 'PATH-ID', 'PATTERN', 'PERFORM', 'POINT', 'POS', 'PRIMARY', 'PRINT', + 'PROCEDURE', 'PROGRAM', 'PUT', 'READ', 'RECORD', 'RECORD-COUNT', + 'RECORD-LENGTH', 'REFRESH', 'RELEASE', 'RENUM', 'REPEAT', 'REPORT', + 'REPORT-INPUT', 'RESHOW', 'RESTART', 'RETRIEVE', 'RETURN-CODE', + 'ROLLBACK', 'ROW', 'S', 'SCREEN', 'SEARCH', 'SECONDARY', 'SELECT', + 'SEQUENCE', 'SIZE', 'SKIP', 'SOKAKU', 'SORT', 'SQL', 'STOP', 'SUM', + 'SYSDATE', 'SYSDATE-LONG', 'SYSIN', 'SYSIPT', 'SYSLST', 'SYSPRINT', + 'SYSSNAP', 'SYSTIME', 'TALLY', 'TERM-COLUMNS', 'TERM-NAME', + 'TERM-ROWS', 'TERMINATION', 'TITLE', 'TO', 'TRANSFER', 'TRC', + 'UNIQUE', 'UNTIL', 'UPDATE', 'UPPERCASE', 'USER', 'USERID', 'VALUE', + 'VERIFY', 'W', 'WHEN', 'WHILE', 'WORK', 'WRITE', 'X', 'XDM', 'XRST' + ] + + tokens = { + 'root': [ + (r'\*.*\n', Comment.Single), + (r'\n+', Whitespace), + # Macro argument + (r'&' + _NON_DELIMITER_OR_COMMENT_PATTERN + r'+\.', Name.Variable, + 'after_macro_argument'), + # Macro call + (r'%' + _NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name.Variable), + (r'(FILE|MACRO|REPORT)(\s+)', + bygroups(Keyword.Declaration, Whitespace), 'after_declaration'), + (r'(JOB|PARM)' + r'(' + _DELIMITER_PATTERN + r')', + bygroups(Keyword.Declaration, Operator)), + (words(_KEYWORDS, suffix=_DELIMITER_PATTERN_CAPTURE), + bygroups(Keyword.Reserved, Operator)), + (_OPERATORS_PATTERN, Operator), + # Procedure declaration + (r'(' + _NON_DELIMITER_OR_COMMENT_PATTERN + r'+)(\s*)(\.?)(\s*)(PROC)(\s*\n)', + bygroups(Name.Function, Whitespace, Operator, Whitespace, + Keyword.Declaration, Whitespace)), + (r'[0-9]+\.[0-9]*', Number.Float), + (r'[0-9]+', Number.Integer), + (r"'(''|[^'])*'", String), + (r'\s+', Whitespace), + # Everything else just belongs to a name + (_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name), + ], + 'after_declaration': [ + (_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name.Function), + default('#pop'), + ], + 'after_macro_argument': [ + (r'\*.*\n', Comment.Single, '#pop'), + (r'\s+', Whitespace, '#pop'), + (_OPERATORS_PATTERN, Operator, '#pop'), + (r"'(''|[^'])*'", String, '#pop'), + # Everything else just belongs to a name + (_NON_DELIMITER_OR_COMMENT_PATTERN + r'+', Name), + ], + } + _COMMENT_LINE_REGEX = re.compile(r'^\s*\*') + _MACRO_HEADER_REGEX = re.compile(r'^\s*MACRO') + + def analyse_text(text): + """ + Perform a structural analysis for basic Easytrieve constructs. + """ + result = 0.0 + lines = text.split('\n') + hasEndProc = False + hasHeaderComment = False + hasFile = False + hasJob = False + hasProc = False + hasParm = False + hasReport = False + + def isCommentLine(line): + return EasytrieveLexer._COMMENT_LINE_REGEX.match(lines[0]) is not None + + def isEmptyLine(line): + return not bool(line.strip()) + + # Remove possible empty lines and header comments. + while lines and (isEmptyLine(lines[0]) or isCommentLine(lines[0])): + if not isEmptyLine(lines[0]): + hasHeaderComment = True + del lines[0] + + if EasytrieveLexer._MACRO_HEADER_REGEX.match(lines[0]): + # Looks like an Easytrieve macro. + result = 0.4 + if hasHeaderComment: + result += 0.4 + else: + # Scan the source for lines starting with indicators. + for line in lines: + words = line.split() + if (len(words) >= 2): + firstWord = words[0] + if not hasReport: + if not hasJob: + if not hasFile: + if not hasParm: + if firstWord == 'PARM': + hasParm = True + if firstWord == 'FILE': + hasFile = True + if firstWord == 'JOB': + hasJob = True + elif firstWord == 'PROC': + hasProc = True + elif firstWord == 'END-PROC': + hasEndProc = True + elif firstWord == 'REPORT': + hasReport = True + + # Weight the findings. + if hasJob and (hasProc == hasEndProc): + if hasHeaderComment: + result += 0.1 + if hasParm: + if hasProc: + # Found PARM, JOB and PROC/END-PROC: + # pretty sure this is Easytrieve. + result += 0.8 + else: + # Found PARAM and JOB: probably this is Easytrieve + result += 0.5 + else: + # Found JOB and possibly other keywords: might be Easytrieve + result += 0.11 + if hasParm: + # Note: PARAM is not a proper English word, so this is + # regarded a much better indicator for Easytrieve than + # the other words. + result += 0.2 + if hasFile: + result += 0.01 + if hasReport: + result += 0.01 + assert 0.0 <= result <= 1.0 + return result + + +class JclLexer(RegexLexer): + """ + `Job Control Language (JCL) + `_ + is a scripting language used on mainframe platforms to instruct the system + on how to run a batch job or start a subsystem. It is somewhat + comparable to MS DOS batch and Unix shell scripts. + + .. versionadded:: 2.1 + """ + name = 'JCL' + aliases = ['jcl'] + filenames = ['*.jcl'] + mimetypes = ['text/x-jcl'] + flags = re.IGNORECASE + + tokens = { + 'root': [ + (r'//\*.*\n', Comment.Single), + (r'//', Keyword.Pseudo, 'statement'), + (r'/\*', Keyword.Pseudo, 'jes2_statement'), + # TODO: JES3 statement + (r'.*\n', Other) # Input text or inline code in any language. + ], + 'statement': [ + (r'\s*\n', Whitespace, '#pop'), + (r'([a-z]\w*)(\s+)(exec|job)(\s*)', + bygroups(Name.Label, Whitespace, Keyword.Reserved, Whitespace), + 'option'), + (r'[a-z]\w*', Name.Variable, 'statement_command'), + (r'\s+', Whitespace, 'statement_command'), + ], + 'statement_command': [ + (r'\s+(command|cntl|dd|endctl|endif|else|include|jcllib|' + r'output|pend|proc|set|then|xmit)\s+', Keyword.Reserved, 'option'), + include('option') + ], + 'jes2_statement': [ + (r'\s*\n', Whitespace, '#pop'), + (r'\$', Keyword, 'option'), + (r'\b(jobparam|message|netacct|notify|output|priority|route|' + r'setup|signoff|xeq|xmit)\b', Keyword, 'option'), + ], + 'option': [ + # (r'\n', Text, 'root'), + (r'\*', Name.Builtin), + (r'[\[\](){}<>;,]', Punctuation), + (r'[-+*/=&%]', Operator), + (r'[a-z_]\w*', Name), + (r'\d+\.\d*', Number.Float), + (r'\.\d+', Number.Float), + (r'\d+', Number.Integer), + (r"'", String, 'option_string'), + (r'[ \t]+', Whitespace, 'option_comment'), + (r'\.', Punctuation), + ], + 'option_string': [ + (r"(\n)(//)", bygroups(Text, Keyword.Pseudo)), + (r"''", String), + (r"[^']", String), + (r"'", String, '#pop'), + ], + 'option_comment': [ + # (r'\n', Text, 'root'), + (r'.+', Comment.Single), + ] + } + + _JOB_HEADER_PATTERN = re.compile(r'^//[a-z#$@][a-z0-9#$@]{0,7}\s+job(\s+.*)?$', + re.IGNORECASE) + + def analyse_text(text): + """ + Recognize JCL job by header. + """ + result = 0.0 + lines = text.split('\n') + if len(lines) > 0: + if JclLexer._JOB_HEADER_PATTERN.match(lines[0]): + result = 1.0 + assert 0.0 <= result <= 1.0 + return result + + +class MiniScriptLexer(RegexLexer): + """ + For `MiniScript `_ source code. + + .. versionadded:: 2.6 + """ + + name = "MiniScript" + aliases = ["ms", "miniscript"] + filenames = ["*.ms"] + mimetypes = ['text/x-minicript', 'application/x-miniscript'] + + tokens = { + 'root': [ + (r'#!(.*?)$', Comment.Preproc), + default('base'), + ], + 'base': [ + ('//.*$', Comment.Single), + (r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number), + (r'(?i)\d+e[+-]?\d+', Number), + (r'\d+', Number), + (r'\n', Text), + (r'[^\S\n]+', Text), + (r'"', String, 'string_double'), + (r'(==|!=|<=|>=|[=+\-*/%^<>.:])', Operator), + (r'[;,\[\]{}()]', Punctuation), + (words(( + 'break', 'continue', 'else', 'end', 'for', 'function', 'if', + 'in', 'isa', 'then', 'repeat', 'return', 'while'), suffix=r'\b'), + Keyword), + (words(( + 'abs', 'acos', 'asin', 'atan', 'ceil', 'char', 'cos', 'floor', + 'log', 'round', 'rnd', 'pi', 'sign', 'sin', 'sqrt', 'str', 'tan', + 'hasIndex', 'indexOf', 'len', 'val', 'code', 'remove', 'lower', + 'upper', 'replace', 'split', 'indexes', 'values', 'join', 'sum', + 'sort', 'shuffle', 'push', 'pop', 'pull', 'range', + 'print', 'input', 'time', 'wait', 'locals', 'globals', 'outer', + 'yield'), suffix=r'\b'), + Name.Builtin), + (r'(true|false|null)\b', Keyword.Constant), + (r'(and|or|not|new)\b', Operator.Word), + (r'(self|super|__isa)\b', Name.Builtin.Pseudo), + (r'[a-zA-Z_]\w*', Name.Variable) + ], + 'string_double': [ + (r'[^"\n]+', String), + (r'""', String), + (r'"', String, '#pop'), + (r'\n', Text, '#pop'), # Stray linefeed also terminates strings. + ] + } diff --git a/pygments/lexers/sgf.py b/pygments/lexers/sgf.py old mode 100644 new mode 100755 index fed864a..445e439 --- a/pygments/lexers/sgf.py +++ b/pygments/lexers/sgf.py @@ -1,61 +1,61 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.sgf - ~~~~~~~~~~~~~~~~~~~ - - Lexer for Smart Game Format (sgf) file format. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import RegexLexer, bygroups -from pygments.token import Name, Literal, String, Text, Punctuation - -__all__ = ["SmartGameFormatLexer"] - - -class SmartGameFormatLexer(RegexLexer): - """ - Lexer for Smart Game Format (sgf) file format. - - The format is used to store game records of board games for two players - (mainly Go game). - For more information about the definition of the format, see: - https://www.red-bean.com/sgf/ - - .. versionadded:: 2.4 - """ - name = 'SmartGameFormat' - aliases = ['sgf'] - filenames = ['*.sgf'] - - tokens = { - 'root': [ - (r'[\s():;]', Punctuation), - # tokens: - (r'(A[BW]|AE|AN|AP|AR|AS|[BW]L|BM|[BW]R|[BW]S|[BW]T|CA|CH|CP|CR|' - r'DD|DM|DO|DT|EL|EV|EX|FF|FG|G[BW]|GC|GM|GN|HA|HO|ID|IP|IT|IY|KM|' - r'KO|LB|LN|LT|L|MA|MN|M|N|OB|OM|ON|OP|OT|OV|P[BW]|PC|PL|PM|RE|RG|' - r'RO|RU|SO|SC|SE|SI|SL|SO|SQ|ST|SU|SZ|T[BW]|TC|TE|TM|TR|UC|US|VW|' - r'V|[BW]|C)', - Name.Builtin), - # number: - (r'(\[)([0-9.]+)(\])', - bygroups(Punctuation, Literal.Number, Punctuation)), - # date: - (r'(\[)([0-9]{4}-[0-9]{2}-[0-9]{2})(\])', - bygroups(Punctuation, Literal.Date, Punctuation)), - # point: - (r'(\[)([a-z]{2})(\])', - bygroups(Punctuation, String, Punctuation)), - # double points: - (r'(\[)([a-z]{2})(:)([a-z]{2})(\])', - bygroups(Punctuation, String, Punctuation, String, Punctuation)), - - (r'(\[)([\w\s#()+,\-.:?]+)(\])', - bygroups(Punctuation, String, Punctuation)), - (r'(\[)(\s.*)(\])', - bygroups(Punctuation, Text, Punctuation)), - ], - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.sgf + ~~~~~~~~~~~~~~~~~~~ + + Lexer for Smart Game Format (sgf) file format. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, bygroups +from pygments.token import Name, Literal, String, Text, Punctuation + +__all__ = ["SmartGameFormatLexer"] + + +class SmartGameFormatLexer(RegexLexer): + """ + Lexer for Smart Game Format (sgf) file format. + + The format is used to store game records of board games for two players + (mainly Go game). + For more information about the definition of the format, see: + https://www.red-bean.com/sgf/ + + .. versionadded:: 2.4 + """ + name = 'SmartGameFormat' + aliases = ['sgf'] + filenames = ['*.sgf'] + + tokens = { + 'root': [ + (r'[\s():;]', Punctuation), + # tokens: + (r'(A[BW]|AE|AN|AP|AR|AS|[BW]L|BM|[BW]R|[BW]S|[BW]T|CA|CH|CP|CR|' + r'DD|DM|DO|DT|EL|EV|EX|FF|FG|G[BW]|GC|GM|GN|HA|HO|ID|IP|IT|IY|KM|' + r'KO|LB|LN|LT|L|MA|MN|M|N|OB|OM|ON|OP|OT|OV|P[BW]|PC|PL|PM|RE|RG|' + r'RO|RU|SO|SC|SE|SI|SL|SO|SQ|ST|SU|SZ|T[BW]|TC|TE|TM|TR|UC|US|VW|' + r'V|[BW]|C)', + Name.Builtin), + # number: + (r'(\[)([0-9.]+)(\])', + bygroups(Punctuation, Literal.Number, Punctuation)), + # date: + (r'(\[)([0-9]{4}-[0-9]{2}-[0-9]{2})(\])', + bygroups(Punctuation, Literal.Date, Punctuation)), + # point: + (r'(\[)([a-z]{2})(\])', + bygroups(Punctuation, String, Punctuation)), + # double points: + (r'(\[)([a-z]{2})(:)([a-z]{2})(\])', + bygroups(Punctuation, String, Punctuation, String, Punctuation)), + + (r'(\[)([\w\s#()+,\-.:?]+)(\])', + bygroups(Punctuation, String, Punctuation)), + (r'(\[)(\s.*)(\])', + bygroups(Punctuation, Text, Punctuation)), + ], + } diff --git a/pygments/lexers/shell.py b/pygments/lexers/shell.py old mode 100644 new mode 100755 index c12cb3f..6e30f65 --- a/pygments/lexers/shell.py +++ b/pygments/lexers/shell.py @@ -1,849 +1,909 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.shell - ~~~~~~~~~~~~~~~~~~~~~ - - Lexers for various shells. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups, \ - include, default, this, using, words -from pygments.token import Punctuation, \ - Text, Comment, Operator, Keyword, Name, String, Number, Generic -from pygments.util import shebang_matches - - -__all__ = ['BashLexer', 'BashSessionLexer', 'TcshLexer', 'BatchLexer', - 'SlurmBashLexer', 'MSDOSSessionLexer', 'PowerShellLexer', - 'PowerShellSessionLexer', 'TcshSessionLexer', 'FishShellLexer'] - -line_re = re.compile('.*?\n') - - -class BashLexer(RegexLexer): - """ - Lexer for (ba|k|z|)sh shell scripts. - - .. versionadded:: 0.6 - """ - - name = 'Bash' - aliases = ['bash', 'sh', 'ksh', 'zsh', 'shell'] - filenames = ['*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', - '*.exheres-0', '*.exlib', '*.zsh', - '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'zshrc', '.zshrc', - 'PKGBUILD'] - mimetypes = ['application/x-sh', 'application/x-shellscript', 'text/x-shellscript'] - - tokens = { - 'root': [ - include('basic'), - (r'`', String.Backtick, 'backticks'), - include('data'), - include('interp'), - ], - 'interp': [ - (r'\$\(\(', Keyword, 'math'), - (r'\$\(', Keyword, 'paren'), - (r'\$\{#?', String.Interpol, 'curly'), - (r'\$[a-zA-Z_]\w*', Name.Variable), # user variable - (r'\$(?:\d+|[#$?!_*@-])', Name.Variable), # builtin - (r'\$', Text), - ], - 'basic': [ - (r'\b(if|fi|else|while|do|done|for|then|return|function|case|' - r'select|continue|until|esac|elif)(\s*)\b', - bygroups(Keyword, Text)), - (r'\b(alias|bg|bind|break|builtin|caller|cd|command|compgen|' - r'complete|declare|dirs|disown|echo|enable|eval|exec|exit|' - r'export|false|fc|fg|getopts|hash|help|history|jobs|kill|let|' - r'local|logout|popd|printf|pushd|pwd|read|readonly|set|shift|' - r'shopt|source|suspend|test|time|times|trap|true|type|typeset|' - r'ulimit|umask|unalias|unset|wait)(?=[\s)`])', - Name.Builtin), - (r'\A#!.+\n', Comment.Hashbang), - (r'#.*\n', Comment.Single), - (r'\\[\w\W]', String.Escape), - (r'(\b\w+)(\s*)(\+?=)', bygroups(Name.Variable, Text, Operator)), - (r'[\[\]{}()=]', Operator), - (r'<<<', Operator), # here-string - (r'<<-?\s*(\'?)\\?(\w+)[\w\W]+?\2', String), - (r'&&|\|\|', Operator), - ], - 'data': [ - (r'(?s)\$?"(\\.|[^"\\$])*"', String.Double), - (r'"', String.Double, 'string'), - (r"(?s)\$'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), - (r"(?s)'.*?'", String.Single), - (r';', Punctuation), - (r'&', Punctuation), - (r'\|', Punctuation), - (r'\s+', Text), - (r'\d+\b', Number), - (r'[^=\s\[\]{}()$"\'`\\<&|;]+', Text), - (r'<', Text), - ], - 'string': [ - (r'"', String.Double, '#pop'), - (r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\$])+', String.Double), - include('interp'), - ], - 'curly': [ - (r'\}', String.Interpol, '#pop'), - (r':-', Keyword), - (r'\w+', Name.Variable), - (r'[^}:"\'`$\\]+', Punctuation), - (r':', Punctuation), - include('root'), - ], - 'paren': [ - (r'\)', Keyword, '#pop'), - include('root'), - ], - 'math': [ - (r'\)\)', Keyword, '#pop'), - (r'[-+*/%^|&]|\*\*|\|\|', Operator), - (r'\d+#\d+', Number), - (r'\d+#(?! )', Number), - (r'\d+', Number), - include('root'), - ], - 'backticks': [ - (r'`', String.Backtick, '#pop'), - include('root'), - ], - } - - def analyse_text(text): - if shebang_matches(text, r'(ba|z|)sh'): - return 1 - if text.startswith('$ '): - return 0.2 - - -class SlurmBashLexer(BashLexer): - """ - Lexer for (ba|k|z|)sh Slurm scripts. - - .. versionadded:: 2.4 - """ - - name = 'Slurm' - aliases = ['slurm', 'sbatch'] - filenames = ['*.sl'] - mimetypes = [] - EXTRA_KEYWORDS = {'srun'} - - def get_tokens_unprocessed(self, text): - for index, token, value in BashLexer.get_tokens_unprocessed(self, text): - if token is Text and value in self.EXTRA_KEYWORDS: - yield index, Name.Builtin, value - elif token is Comment.Single and 'SBATCH' in value: - yield index, Keyword.Pseudo, value - else: - yield index, token, value - -class ShellSessionBaseLexer(Lexer): - """ - Base lexer for simplistic shell sessions. - - .. versionadded:: 2.1 - """ - - _venv = re.compile(r'^(\([^)]*\))(\s*)') - - def get_tokens_unprocessed(self, text): - innerlexer = self._innerLexerCls(**self.options) - - pos = 0 - curcode = '' - insertions = [] - backslash_continuation = False - - for match in line_re.finditer(text): - line = match.group() - if backslash_continuation: - curcode += line - backslash_continuation = curcode.endswith('\\\n') - continue - - venv_match = self._venv.match(line) - if venv_match: - venv = venv_match.group(1) - venv_whitespace = venv_match.group(2) - insertions.append((len(curcode), - [(0, Generic.Prompt.VirtualEnv, venv)])) - if venv_whitespace: - insertions.append((len(curcode), - [(0, Text, venv_whitespace)])) - line = line[venv_match.end():] - - m = self._ps1rgx.match(line) - if m: - # To support output lexers (say diff output), the output - # needs to be broken by prompts whenever the output lexer - # changes. - if not insertions: - pos = match.start() - - insertions.append((len(curcode), - [(0, Generic.Prompt, m.group(1))])) - curcode += m.group(2) - backslash_continuation = curcode.endswith('\\\n') - elif line.startswith(self._ps2): - insertions.append((len(curcode), - [(0, Generic.Prompt, line[:len(self._ps2)])])) - curcode += line[len(self._ps2):] - backslash_continuation = curcode.endswith('\\\n') - else: - if insertions: - toks = innerlexer.get_tokens_unprocessed(curcode) - for i, t, v in do_insertions(insertions, toks): - yield pos+i, t, v - yield match.start(), Generic.Output, line - insertions = [] - curcode = '' - if insertions: - for i, t, v in do_insertions(insertions, - innerlexer.get_tokens_unprocessed(curcode)): - yield pos+i, t, v - - -class BashSessionLexer(ShellSessionBaseLexer): - """ - Lexer for simplistic shell sessions. - - .. versionadded:: 1.1 - """ - - name = 'Bash Session' - aliases = ['console', 'shell-session'] - filenames = ['*.sh-session', '*.shell-session'] - mimetypes = ['application/x-shell-session', 'application/x-sh-session'] - - _innerLexerCls = BashLexer - _ps1rgx = re.compile( - r'^((?:(?:\[.*?\])|(?:\(\S+\))?(?:| |sh\S*?|\w+\S+[@:]\S+(?:\s+\S+)' \ - r'?|\[\S+[@:][^\n]+\].+))\s*[$#%])(.*\n?)') - _ps2 = '>' - - -class BatchLexer(RegexLexer): - """ - Lexer for the DOS/Windows Batch file format. - - .. versionadded:: 0.7 - """ - name = 'Batchfile' - aliases = ['bat', 'batch', 'dosbatch', 'winbatch'] - filenames = ['*.bat', '*.cmd'] - mimetypes = ['application/x-dos-batch'] - - flags = re.MULTILINE | re.IGNORECASE - - _nl = r'\n\x1a' - _punct = r'&<>|' - _ws = r'\t\v\f\r ,;=\xa0' - _space = r'(?:(?:(?:\^[%s])?[%s])+)' % (_nl, _ws) - _keyword_terminator = (r'(?=(?:\^[%s]?)?[%s+./:[\\\]]|[%s%s(])' % - (_nl, _ws, _nl, _punct)) - _token_terminator = r'(?=\^?[%s]|[%s%s])' % (_ws, _punct, _nl) - _start_label = r'((?:(?<=^[^:])|^[^:]?)[%s]*)(:)' % _ws - _label = r'(?:(?:[^%s%s%s+:^]|\^[%s]?[\w\W])*)' % (_nl, _punct, _ws, _nl) - _label_compound = (r'(?:(?:[^%s%s%s+:^)]|\^[%s]?[^)])*)' % - (_nl, _punct, _ws, _nl)) - _number = r'(?:-?(?:0[0-7]+|0x[\da-f]+|\d+)%s)' % _token_terminator - _opword = r'(?:equ|geq|gtr|leq|lss|neq)' - _string = r'(?:"[^%s"]*(?:"|(?=[%s])))' % (_nl, _nl) - _variable = (r'(?:(?:%%(?:\*|(?:~[a-z]*(?:\$[^:]+:)?)?\d|' - r'[^%%:%s]+(?::(?:~(?:-?\d+)?(?:,(?:-?\d+)?)?|(?:[^%%%s^]|' - r'\^[^%%%s])[^=%s]*=(?:[^%%%s^]|\^[^%%%s])*)?)?%%))|' - r'(?:\^?![^!:%s]+(?::(?:~(?:-?\d+)?(?:,(?:-?\d+)?)?|(?:' - r'[^!%s^]|\^[^!%s])[^=%s]*=(?:[^!%s^]|\^[^!%s])*)?)?\^?!))' % - (_nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl)) - _core_token = r'(?:(?:(?:\^[%s]?)?[^"%s%s%s])+)' % (_nl, _nl, _punct, _ws) - _core_token_compound = r'(?:(?:(?:\^[%s]?)?[^"%s%s%s)])+)' % (_nl, _nl, - _punct, _ws) - _token = r'(?:[%s]+|%s)' % (_punct, _core_token) - _token_compound = r'(?:[%s]+|%s)' % (_punct, _core_token_compound) - _stoken = (r'(?:[%s]+|(?:%s|%s|%s)+)' % - (_punct, _string, _variable, _core_token)) - - def _make_begin_state(compound, _core_token=_core_token, - _core_token_compound=_core_token_compound, - _keyword_terminator=_keyword_terminator, - _nl=_nl, _punct=_punct, _string=_string, - _space=_space, _start_label=_start_label, - _stoken=_stoken, _token_terminator=_token_terminator, - _variable=_variable, _ws=_ws): - rest = '(?:%s|%s|[^"%%%s%s%s])*' % (_string, _variable, _nl, _punct, - ')' if compound else '') - rest_of_line = r'(?:(?:[^%s^]|\^[%s]?[\w\W])*)' % (_nl, _nl) - rest_of_line_compound = r'(?:(?:[^%s^)]|\^[%s]?[^)])*)' % (_nl, _nl) - set_space = r'((?:(?:\^[%s]?)?[^\S\n])*)' % _nl - suffix = '' - if compound: - _keyword_terminator = r'(?:(?=\))|%s)' % _keyword_terminator - _token_terminator = r'(?:(?=\))|%s)' % _token_terminator - suffix = '/compound' - return [ - ((r'\)', Punctuation, '#pop') if compound else - (r'\)((?=\()|%s)%s' % (_token_terminator, rest_of_line), - Comment.Single)), - (r'(?=%s)' % _start_label, Text, 'follow%s' % suffix), - (_space, using(this, state='text')), - include('redirect%s' % suffix), - (r'[%s]+' % _nl, Text), - (r'\(', Punctuation, 'root/compound'), - (r'@+', Punctuation), - (r'((?:for|if|rem)(?:(?=(?:\^[%s]?)?/)|(?:(?!\^)|' - r'(?<=m))(?:(?=\()|%s)))(%s?%s?(?:\^[%s]?)?/(?:\^[%s]?)?\?)' % - (_nl, _token_terminator, _space, - _core_token_compound if compound else _core_token, _nl, _nl), - bygroups(Keyword, using(this, state='text')), - 'follow%s' % suffix), - (r'(goto%s)(%s(?:\^[%s]?)?/(?:\^[%s]?)?\?%s)' % - (_keyword_terminator, rest, _nl, _nl, rest), - bygroups(Keyword, using(this, state='text')), - 'follow%s' % suffix), - (words(('assoc', 'break', 'cd', 'chdir', 'cls', 'color', 'copy', - 'date', 'del', 'dir', 'dpath', 'echo', 'endlocal', 'erase', - 'exit', 'ftype', 'keys', 'md', 'mkdir', 'mklink', 'move', - 'path', 'pause', 'popd', 'prompt', 'pushd', 'rd', 'ren', - 'rename', 'rmdir', 'setlocal', 'shift', 'start', 'time', - 'title', 'type', 'ver', 'verify', 'vol'), - suffix=_keyword_terminator), Keyword, 'follow%s' % suffix), - (r'(call)(%s?)(:)' % _space, - bygroups(Keyword, using(this, state='text'), Punctuation), - 'call%s' % suffix), - (r'call%s' % _keyword_terminator, Keyword), - (r'(for%s(?!\^))(%s)(/f%s)' % - (_token_terminator, _space, _token_terminator), - bygroups(Keyword, using(this, state='text'), Keyword), - ('for/f', 'for')), - (r'(for%s(?!\^))(%s)(/l%s)' % - (_token_terminator, _space, _token_terminator), - bygroups(Keyword, using(this, state='text'), Keyword), - ('for/l', 'for')), - (r'for%s(?!\^)' % _token_terminator, Keyword, ('for2', 'for')), - (r'(goto%s)(%s?)(:?)' % (_keyword_terminator, _space), - bygroups(Keyword, using(this, state='text'), Punctuation), - 'label%s' % suffix), - (r'(if(?:(?=\()|%s)(?!\^))(%s?)((?:/i%s)?)(%s?)((?:not%s)?)(%s?)' % - (_token_terminator, _space, _token_terminator, _space, - _token_terminator, _space), - bygroups(Keyword, using(this, state='text'), Keyword, - using(this, state='text'), Keyword, - using(this, state='text')), ('(?', 'if')), - (r'rem(((?=\()|%s)%s?%s?.*|%s%s)' % - (_token_terminator, _space, _stoken, _keyword_terminator, - rest_of_line_compound if compound else rest_of_line), - Comment.Single, 'follow%s' % suffix), - (r'(set%s)%s(/a)' % (_keyword_terminator, set_space), - bygroups(Keyword, using(this, state='text'), Keyword), - 'arithmetic%s' % suffix), - (r'(set%s)%s((?:/p)?)%s((?:(?:(?:\^[%s]?)?[^"%s%s^=%s]|' - r'\^[%s]?[^"=])+)?)((?:(?:\^[%s]?)?=)?)' % - (_keyword_terminator, set_space, set_space, _nl, _nl, _punct, - ')' if compound else '', _nl, _nl), - bygroups(Keyword, using(this, state='text'), Keyword, - using(this, state='text'), using(this, state='variable'), - Punctuation), - 'follow%s' % suffix), - default('follow%s' % suffix) - ] - - def _make_follow_state(compound, _label=_label, - _label_compound=_label_compound, _nl=_nl, - _space=_space, _start_label=_start_label, - _token=_token, _token_compound=_token_compound, - _ws=_ws): - suffix = '/compound' if compound else '' - state = [] - if compound: - state.append((r'(?=\))', Text, '#pop')) - state += [ - (r'%s([%s]*)(%s)(.*)' % - (_start_label, _ws, _label_compound if compound else _label), - bygroups(Text, Punctuation, Text, Name.Label, Comment.Single)), - include('redirect%s' % suffix), - (r'(?=[%s])' % _nl, Text, '#pop'), - (r'\|\|?|&&?', Punctuation, '#pop'), - include('text') - ] - return state - - def _make_arithmetic_state(compound, _nl=_nl, _punct=_punct, - _string=_string, _variable=_variable, _ws=_ws): - op = r'=+\-*/!~' - state = [] - if compound: - state.append((r'(?=\))', Text, '#pop')) - state += [ - (r'0[0-7]+', Number.Oct), - (r'0x[\da-f]+', Number.Hex), - (r'\d+', Number.Integer), - (r'[(),]+', Punctuation), - (r'([%s]|%%|\^\^)+' % op, Operator), - (r'(%s|%s|(\^[%s]?)?[^()%s%%^"%s%s%s]|\^[%s%s]?%s)+' % - (_string, _variable, _nl, op, _nl, _punct, _ws, _nl, _ws, - r'[^)]' if compound else r'[\w\W]'), - using(this, state='variable')), - (r'(?=[\x00|&])', Text, '#pop'), - include('follow') - ] - return state - - def _make_call_state(compound, _label=_label, - _label_compound=_label_compound): - state = [] - if compound: - state.append((r'(?=\))', Text, '#pop')) - state.append((r'(:?)(%s)' % (_label_compound if compound else _label), - bygroups(Punctuation, Name.Label), '#pop')) - return state - - def _make_label_state(compound, _label=_label, - _label_compound=_label_compound, _nl=_nl, - _punct=_punct, _string=_string, _variable=_variable): - state = [] - if compound: - state.append((r'(?=\))', Text, '#pop')) - state.append((r'(%s?)((?:%s|%s|\^[%s]?%s|[^"%%^%s%s%s])*)' % - (_label_compound if compound else _label, _string, - _variable, _nl, r'[^)]' if compound else r'[\w\W]', _nl, - _punct, r')' if compound else ''), - bygroups(Name.Label, Comment.Single), '#pop')) - return state - - def _make_redirect_state(compound, - _core_token_compound=_core_token_compound, - _nl=_nl, _punct=_punct, _stoken=_stoken, - _string=_string, _space=_space, - _variable=_variable, _ws=_ws): - stoken_compound = (r'(?:[%s]+|(?:%s|%s|%s)+)' % - (_punct, _string, _variable, _core_token_compound)) - return [ - (r'((?:(?<=[%s%s])\d)?)(>>?&|<&)([%s%s]*)(\d)' % - (_nl, _ws, _nl, _ws), - bygroups(Number.Integer, Punctuation, Text, Number.Integer)), - (r'((?:(?<=[%s%s])(?>?|<)(%s?%s)' % - (_nl, _ws, _nl, _space, stoken_compound if compound else _stoken), - bygroups(Number.Integer, Punctuation, using(this, state='text'))) - ] - - tokens = { - 'root': _make_begin_state(False), - 'follow': _make_follow_state(False), - 'arithmetic': _make_arithmetic_state(False), - 'call': _make_call_state(False), - 'label': _make_label_state(False), - 'redirect': _make_redirect_state(False), - 'root/compound': _make_begin_state(True), - 'follow/compound': _make_follow_state(True), - 'arithmetic/compound': _make_arithmetic_state(True), - 'call/compound': _make_call_state(True), - 'label/compound': _make_label_state(True), - 'redirect/compound': _make_redirect_state(True), - 'variable-or-escape': [ - (_variable, Name.Variable), - (r'%%%%|\^[%s]?(\^!|[\w\W])' % _nl, String.Escape) - ], - 'string': [ - (r'"', String.Double, '#pop'), - (_variable, Name.Variable), - (r'\^!|%%', String.Escape), - (r'[^"%%^%s]+|[%%^]' % _nl, String.Double), - default('#pop') - ], - 'sqstring': [ - include('variable-or-escape'), - (r'[^%]+|%', String.Single) - ], - 'bqstring': [ - include('variable-or-escape'), - (r'[^%]+|%', String.Backtick) - ], - 'text': [ - (r'"', String.Double, 'string'), - include('variable-or-escape'), - (r'[^"%%^%s%s%s\d)]+|.' % (_nl, _punct, _ws), Text) - ], - 'variable': [ - (r'"', String.Double, 'string'), - include('variable-or-escape'), - (r'[^"%%^%s]+|.' % _nl, Name.Variable) - ], - 'for': [ - (r'(%s)(in)(%s)(\()' % (_space, _space), - bygroups(using(this, state='text'), Keyword, - using(this, state='text'), Punctuation), '#pop'), - include('follow') - ], - 'for2': [ - (r'\)', Punctuation), - (r'(%s)(do%s)' % (_space, _token_terminator), - bygroups(using(this, state='text'), Keyword), '#pop'), - (r'[%s]+' % _nl, Text), - include('follow') - ], - 'for/f': [ - (r'(")((?:%s|[^"])*?")([%s%s]*)(\))' % (_variable, _nl, _ws), - bygroups(String.Double, using(this, state='string'), Text, - Punctuation)), - (r'"', String.Double, ('#pop', 'for2', 'string')), - (r"('(?:%%%%|%s|[\w\W])*?')([%s%s]*)(\))" % (_variable, _nl, _ws), - bygroups(using(this, state='sqstring'), Text, Punctuation)), - (r'(`(?:%%%%|%s|[\w\W])*?`)([%s%s]*)(\))' % (_variable, _nl, _ws), - bygroups(using(this, state='bqstring'), Text, Punctuation)), - include('for2') - ], - 'for/l': [ - (r'-?\d+', Number.Integer), - include('for2') - ], - 'if': [ - (r'((?:cmdextversion|errorlevel)%s)(%s)(\d+)' % - (_token_terminator, _space), - bygroups(Keyword, using(this, state='text'), - Number.Integer), '#pop'), - (r'(defined%s)(%s)(%s)' % (_token_terminator, _space, _stoken), - bygroups(Keyword, using(this, state='text'), - using(this, state='variable')), '#pop'), - (r'(exist%s)(%s%s)' % (_token_terminator, _space, _stoken), - bygroups(Keyword, using(this, state='text')), '#pop'), - (r'(%s%s)(%s)(%s%s)' % (_number, _space, _opword, _space, _number), - bygroups(using(this, state='arithmetic'), Operator.Word, - using(this, state='arithmetic')), '#pop'), - (_stoken, using(this, state='text'), ('#pop', 'if2')), - ], - 'if2': [ - (r'(%s?)(==)(%s?%s)' % (_space, _space, _stoken), - bygroups(using(this, state='text'), Operator, - using(this, state='text')), '#pop'), - (r'(%s)(%s)(%s%s)' % (_space, _opword, _space, _stoken), - bygroups(using(this, state='text'), Operator.Word, - using(this, state='text')), '#pop') - ], - '(?': [ - (_space, using(this, state='text')), - (r'\(', Punctuation, ('#pop', 'else?', 'root/compound')), - default('#pop') - ], - 'else?': [ - (_space, using(this, state='text')), - (r'else%s' % _token_terminator, Keyword, '#pop'), - default('#pop') - ] - } - - -class MSDOSSessionLexer(ShellSessionBaseLexer): - """ - Lexer for simplistic MSDOS sessions. - - .. versionadded:: 2.1 - """ - - name = 'MSDOS Session' - aliases = ['doscon'] - filenames = [] - mimetypes = [] - - _innerLexerCls = BatchLexer - _ps1rgx = re.compile(r'^([^>]*>)(.*\n?)') - _ps2 = 'More? ' - - -class TcshLexer(RegexLexer): - """ - Lexer for tcsh scripts. - - .. versionadded:: 0.10 - """ - - name = 'Tcsh' - aliases = ['tcsh', 'csh'] - filenames = ['*.tcsh', '*.csh'] - mimetypes = ['application/x-csh'] - - tokens = { - 'root': [ - include('basic'), - (r'\$\(', Keyword, 'paren'), - (r'\$\{#?', Keyword, 'curly'), - (r'`', String.Backtick, 'backticks'), - include('data'), - ], - 'basic': [ - (r'\b(if|endif|else|while|then|foreach|case|default|' - r'continue|goto|breaksw|end|switch|endsw)\s*\b', - Keyword), - (r'\b(alias|alloc|bg|bindkey|break|builtins|bye|caller|cd|chdir|' - r'complete|dirs|echo|echotc|eval|exec|exit|fg|filetest|getxvers|' - r'glob|getspath|hashstat|history|hup|inlib|jobs|kill|' - r'limit|log|login|logout|ls-F|migrate|newgrp|nice|nohup|notify|' - r'onintr|popd|printenv|pushd|rehash|repeat|rootnode|popd|pushd|' - r'set|shift|sched|setenv|setpath|settc|setty|setxvers|shift|' - r'source|stop|suspend|source|suspend|telltc|time|' - r'umask|unalias|uncomplete|unhash|universe|unlimit|unset|unsetenv|' - r'ver|wait|warp|watchlog|where|which)\s*\b', - Name.Builtin), - (r'#.*', Comment), - (r'\\[\w\W]', String.Escape), - (r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)), - (r'[\[\]{}()=]+', Operator), - (r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String), - (r';', Punctuation), - ], - 'data': [ - (r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double), - (r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), - (r'\s+', Text), - (r'[^=\s\[\]{}()$"\'`\\;#]+', Text), - (r'\d+(?= |\Z)', Number), - (r'\$#?(\w+|.)', Name.Variable), - ], - 'curly': [ - (r'\}', Keyword, '#pop'), - (r':-', Keyword), - (r'\w+', Name.Variable), - (r'[^}:"\'`$]+', Punctuation), - (r':', Punctuation), - include('root'), - ], - 'paren': [ - (r'\)', Keyword, '#pop'), - include('root'), - ], - 'backticks': [ - (r'`', String.Backtick, '#pop'), - include('root'), - ], - } - - -class TcshSessionLexer(ShellSessionBaseLexer): - """ - Lexer for Tcsh sessions. - - .. versionadded:: 2.1 - """ - - name = 'Tcsh Session' - aliases = ['tcshcon'] - filenames = [] - mimetypes = [] - - _innerLexerCls = TcshLexer - _ps1rgx = re.compile(r'^([^>]+>)(.*\n?)') - _ps2 = '? ' - - -class PowerShellLexer(RegexLexer): - """ - For Windows PowerShell code. - - .. versionadded:: 1.5 - """ - name = 'PowerShell' - aliases = ['powershell', 'posh', 'ps1', 'psm1'] - filenames = ['*.ps1', '*.psm1'] - mimetypes = ['text/x-powershell'] - - flags = re.DOTALL | re.IGNORECASE | re.MULTILINE - - keywords = ( - 'while validateset validaterange validatepattern validatelength ' - 'validatecount until trap switch return ref process param parameter in ' - 'if global: function foreach for finally filter end elseif else ' - 'dynamicparam do default continue cmdletbinding break begin alias \\? ' - '% #script #private #local #global mandatory parametersetname position ' - 'valuefrompipeline valuefrompipelinebypropertyname ' - 'valuefromremainingarguments helpmessage try catch throw').split() - - operators = ( - 'and as band bnot bor bxor casesensitive ccontains ceq cge cgt cle ' - 'clike clt cmatch cne cnotcontains cnotlike cnotmatch contains ' - 'creplace eq exact f file ge gt icontains ieq ige igt ile ilike ilt ' - 'imatch ine inotcontains inotlike inotmatch ireplace is isnot le like ' - 'lt match ne not notcontains notlike notmatch or regex replace ' - 'wildcard').split() - - verbs = ( - 'write where watch wait use update unregister unpublish unprotect ' - 'unlock uninstall undo unblock trace test tee take sync switch ' - 'suspend submit stop step start split sort skip show set send select ' - 'search scroll save revoke resume restore restart resolve resize ' - 'reset request repair rename remove register redo receive read push ' - 'publish protect pop ping out optimize open new move mount merge ' - 'measure lock limit join invoke install initialize import hide group ' - 'grant get format foreach find export expand exit enter enable edit ' - 'dismount disconnect disable deny debug cxnew copy convertto ' - 'convertfrom convert connect confirm compress complete compare close ' - 'clear checkpoint block backup assert approve aggregate add').split() - - aliases_ = ( - 'ac asnp cat cd cfs chdir clc clear clhy cli clp cls clv cnsn ' - 'compare copy cp cpi cpp curl cvpa dbp del diff dir dnsn ebp echo epal ' - 'epcsv epsn erase etsn exsn fc fhx fl foreach ft fw gal gbp gc gci gcm ' - 'gcs gdr ghy gi gjb gl gm gmo gp gps gpv group gsn gsnp gsv gu gv gwmi ' - 'h history icm iex ihy ii ipal ipcsv ipmo ipsn irm ise iwmi iwr kill lp ' - 'ls man md measure mi mount move mp mv nal ndr ni nmo npssc nsn nv ogv ' - 'oh popd ps pushd pwd r rbp rcjb rcsn rd rdr ren ri rjb rm rmdir rmo ' - 'rni rnp rp rsn rsnp rujb rv rvpa rwmi sajb sal saps sasv sbp sc select ' - 'set shcm si sl sleep sls sort sp spjb spps spsv start sujb sv swmi tee ' - 'trcm type wget where wjb write').split() - - commenthelp = ( - 'component description example externalhelp forwardhelpcategory ' - 'forwardhelptargetname functionality inputs link ' - 'notes outputs parameter remotehelprunspace role synopsis').split() - - tokens = { - 'root': [ - # we need to count pairs of parentheses for correct highlight - # of '$(...)' blocks in strings - (r'\(', Punctuation, 'child'), - (r'\s+', Text), - (r'^(\s*#[#\s]*)(\.(?:%s))([^\n]*$)' % '|'.join(commenthelp), - bygroups(Comment, String.Doc, Comment)), - (r'#[^\n]*?$', Comment), - (r'(<|<)#', Comment.Multiline, 'multline'), - (r'@"\n', String.Heredoc, 'heredoc-double'), - (r"@'\n.*?\n'@", String.Heredoc), - # escaped syntax - (r'`[\'"$@-]', Punctuation), - (r'"', String.Double, 'string'), - (r"'([^']|'')*'", String.Single), - (r'(\$|@@|@)((global|script|private|env):)?\w+', - Name.Variable), - (r'(%s)\b' % '|'.join(keywords), Keyword), - (r'-(%s)\b' % '|'.join(operators), Operator), - (r'(%s)-[a-z_]\w*\b' % '|'.join(verbs), Name.Builtin), - (r'(%s)\s' % '|'.join(aliases_), Name.Builtin), - (r'\[[a-z_\[][\w. `,\[\]]*\]', Name.Constant), # .net [type]s - (r'-[a-z_]\w*', Name), - (r'\w+', Name), - (r'[.,;@{}\[\]$()=+*/\\&%!~?^`|<>-]|::', Punctuation), - ], - 'child': [ - (r'\)', Punctuation, '#pop'), - include('root'), - ], - 'multline': [ - (r'[^#&.]+', Comment.Multiline), - (r'#(>|>)', Comment.Multiline, '#pop'), - (r'\.(%s)' % '|'.join(commenthelp), String.Doc), - (r'[#&.]', Comment.Multiline), - ], - 'string': [ - (r"`[0abfnrtv'\"$`]", String.Escape), - (r'[^$`"]+', String.Double), - (r'\$\(', Punctuation, 'child'), - (r'""', String.Double), - (r'[`$]', String.Double), - (r'"', String.Double, '#pop'), - ], - 'heredoc-double': [ - (r'\n"@', String.Heredoc, '#pop'), - (r'\$\(', Punctuation, 'child'), - (r'[^@\n]+"]', String.Heredoc), - (r".", String.Heredoc), - ] - } - - -class PowerShellSessionLexer(ShellSessionBaseLexer): - """ - Lexer for simplistic Windows PowerShell sessions. - - .. versionadded:: 2.1 - """ - - name = 'PowerShell Session' - aliases = ['ps1con'] - filenames = [] - mimetypes = [] - - _innerLexerCls = PowerShellLexer - _ps1rgx = re.compile(r'^(PS [^>]+> )(.*\n?)') - _ps2 = '>> ' - - -class FishShellLexer(RegexLexer): - """ - Lexer for Fish shell scripts. - - .. versionadded:: 2.1 - """ - - name = 'Fish' - aliases = ['fish', 'fishshell'] - filenames = ['*.fish', '*.load'] - mimetypes = ['application/x-fish'] - - tokens = { - 'root': [ - include('basic'), - include('data'), - include('interp'), - ], - 'interp': [ - (r'\$\(\(', Keyword, 'math'), - (r'\(', Keyword, 'paren'), - (r'\$#?(\w+|.)', Name.Variable), - ], - 'basic': [ - (r'\b(begin|end|if|else|while|break|for|in|return|function|block|' - r'case|continue|switch|not|and|or|set|echo|exit|pwd|true|false|' - r'cd|count|test)(\s*)\b', - bygroups(Keyword, Text)), - (r'\b(alias|bg|bind|breakpoint|builtin|command|commandline|' - r'complete|contains|dirh|dirs|emit|eval|exec|fg|fish|fish_config|' - r'fish_indent|fish_pager|fish_prompt|fish_right_prompt|' - r'fish_update_completions|fishd|funced|funcsave|functions|help|' - r'history|isatty|jobs|math|mimedb|nextd|open|popd|prevd|psub|' - r'pushd|random|read|set_color|source|status|trap|type|ulimit|' - r'umask|vared|fc|getopts|hash|kill|printf|time|wait)\s*\b(?!\.)', - Name.Builtin), - (r'#.*\n', Comment), - (r'\\[\w\W]', String.Escape), - (r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)), - (r'[\[\]()=]', Operator), - (r'<<-?\s*(\'?)\\?(\w+)[\w\W]+?\2', String), - ], - 'data': [ - (r'(?s)\$?"(\\\\|\\[0-7]+|\\.|[^"\\$])*"', String.Double), - (r'"', String.Double, 'string'), - (r"(?s)\$'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), - (r"(?s)'.*?'", String.Single), - (r';', Punctuation), - (r'&|\||\^|<|>', Operator), - (r'\s+', Text), - (r'\d+(?= |\Z)', Number), - (r'[^=\s\[\]{}()$"\'`\\<&|;]+', Text), - ], - 'string': [ - (r'"', String.Double, '#pop'), - (r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\$])+', String.Double), - include('interp'), - ], - 'paren': [ - (r'\)', Keyword, '#pop'), - include('root'), - ], - 'math': [ - (r'\)\)', Keyword, '#pop'), - (r'[-+*/%^|&]|\*\*|\|\|', Operator), - (r'\d+#\d+', Number), - (r'\d+#(?! )', Number), - (r'\d+', Number), - include('root'), - ], - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.shell + ~~~~~~~~~~~~~~~~~~~~~ + + Lexers for various shells. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups, \ + include, default, this, using, words +from pygments.token import Punctuation, \ + Text, Comment, Operator, Keyword, Name, String, Number, Generic +from pygments.util import shebang_matches + + +__all__ = ['BashLexer', 'BashSessionLexer', 'TcshLexer', 'BatchLexer', + 'SlurmBashLexer', 'MSDOSSessionLexer', 'PowerShellLexer', + 'PowerShellSessionLexer', 'TcshSessionLexer', 'FishShellLexer', + 'ExeclineLexer'] + +line_re = re.compile('.*?\n') + + +class BashLexer(RegexLexer): + """ + Lexer for (ba|k|z|)sh shell scripts. + + .. versionadded:: 0.6 + """ + + name = 'Bash' + aliases = ['bash', 'sh', 'ksh', 'zsh', 'shell'] + filenames = ['*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', + '*.exheres-0', '*.exlib', '*.zsh', + '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'zshrc', '.zshrc', + 'PKGBUILD'] + mimetypes = ['application/x-sh', 'application/x-shellscript', 'text/x-shellscript'] + + tokens = { + 'root': [ + include('basic'), + (r'`', String.Backtick, 'backticks'), + include('data'), + include('interp'), + ], + 'interp': [ + (r'\$\(\(', Keyword, 'math'), + (r'\$\(', Keyword, 'paren'), + (r'\$\{#?', String.Interpol, 'curly'), + (r'\$[a-zA-Z_]\w*', Name.Variable), # user variable + (r'\$(?:\d+|[#$?!_*@-])', Name.Variable), # builtin + (r'\$', Text), + ], + 'basic': [ + (r'\b(if|fi|else|while|do|done|for|then|return|function|case|' + r'select|continue|until|esac|elif)(\s*)\b', + bygroups(Keyword, Text)), + (r'\b(alias|bg|bind|break|builtin|caller|cd|command|compgen|' + r'complete|declare|dirs|disown|echo|enable|eval|exec|exit|' + r'export|false|fc|fg|getopts|hash|help|history|jobs|kill|let|' + r'local|logout|popd|printf|pushd|pwd|read|readonly|set|shift|' + r'shopt|source|suspend|test|time|times|trap|true|type|typeset|' + r'ulimit|umask|unalias|unset|wait)(?=[\s)`])', + Name.Builtin), + (r'\A#!.+\n', Comment.Hashbang), + (r'#.*\n', Comment.Single), + (r'\\[\w\W]', String.Escape), + (r'(\b\w+)(\s*)(\+?=)', bygroups(Name.Variable, Text, Operator)), + (r'[\[\]{}()=]', Operator), + (r'<<<', Operator), # here-string + (r'<<-?\s*(\'?)\\?(\w+)[\w\W]+?\2', String), + (r'&&|\|\|', Operator), + ], + 'data': [ + (r'(?s)\$?"(\\.|[^"\\$])*"', String.Double), + (r'"', String.Double, 'string'), + (r"(?s)\$'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), + (r"(?s)'.*?'", String.Single), + (r';', Punctuation), + (r'&', Punctuation), + (r'\|', Punctuation), + (r'\s+', Text), + (r'\d+\b', Number), + (r'[^=\s\[\]{}()$"\'`\\<&|;]+', Text), + (r'<', Text), + ], + 'string': [ + (r'"', String.Double, '#pop'), + (r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\$])+', String.Double), + include('interp'), + ], + 'curly': [ + (r'\}', String.Interpol, '#pop'), + (r':-', Keyword), + (r'\w+', Name.Variable), + (r'[^}:"\'`$\\]+', Punctuation), + (r':', Punctuation), + include('root'), + ], + 'paren': [ + (r'\)', Keyword, '#pop'), + include('root'), + ], + 'math': [ + (r'\)\)', Keyword, '#pop'), + (r'[-+*/%^|&]|\*\*|\|\|', Operator), + (r'\d+#\d+', Number), + (r'\d+#(?! )', Number), + (r'\d+', Number), + include('root'), + ], + 'backticks': [ + (r'`', String.Backtick, '#pop'), + include('root'), + ], + } + + def analyse_text(text): + if shebang_matches(text, r'(ba|z|)sh'): + return 1 + if text.startswith('$ '): + return 0.2 + + +class SlurmBashLexer(BashLexer): + """ + Lexer for (ba|k|z|)sh Slurm scripts. + + .. versionadded:: 2.4 + """ + + name = 'Slurm' + aliases = ['slurm', 'sbatch'] + filenames = ['*.sl'] + mimetypes = [] + EXTRA_KEYWORDS = {'srun'} + + def get_tokens_unprocessed(self, text): + for index, token, value in BashLexer.get_tokens_unprocessed(self, text): + if token is Text and value in self.EXTRA_KEYWORDS: + yield index, Name.Builtin, value + elif token is Comment.Single and 'SBATCH' in value: + yield index, Keyword.Pseudo, value + else: + yield index, token, value + +class ShellSessionBaseLexer(Lexer): + """ + Base lexer for simplistic shell sessions. + + .. versionadded:: 2.1 + """ + + _venv = re.compile(r'^(\([^)]*\))(\s*)') + + def get_tokens_unprocessed(self, text): + innerlexer = self._innerLexerCls(**self.options) + + pos = 0 + curcode = '' + insertions = [] + backslash_continuation = False + + for match in line_re.finditer(text): + line = match.group() + if backslash_continuation: + curcode += line + backslash_continuation = curcode.endswith('\\\n') + continue + + venv_match = self._venv.match(line) + if venv_match: + venv = venv_match.group(1) + venv_whitespace = venv_match.group(2) + insertions.append((len(curcode), + [(0, Generic.Prompt.VirtualEnv, venv)])) + if venv_whitespace: + insertions.append((len(curcode), + [(0, Text, venv_whitespace)])) + line = line[venv_match.end():] + + m = self._ps1rgx.match(line) + if m: + # To support output lexers (say diff output), the output + # needs to be broken by prompts whenever the output lexer + # changes. + if not insertions: + pos = match.start() + + insertions.append((len(curcode), + [(0, Generic.Prompt, m.group(1))])) + curcode += m.group(2) + backslash_continuation = curcode.endswith('\\\n') + elif line.startswith(self._ps2): + insertions.append((len(curcode), + [(0, Generic.Prompt, line[:len(self._ps2)])])) + curcode += line[len(self._ps2):] + backslash_continuation = curcode.endswith('\\\n') + else: + if insertions: + toks = innerlexer.get_tokens_unprocessed(curcode) + for i, t, v in do_insertions(insertions, toks): + yield pos+i, t, v + yield match.start(), Generic.Output, line + insertions = [] + curcode = '' + if insertions: + for i, t, v in do_insertions(insertions, + innerlexer.get_tokens_unprocessed(curcode)): + yield pos+i, t, v + + +class BashSessionLexer(ShellSessionBaseLexer): + """ + Lexer for simplistic shell sessions. + + .. versionadded:: 1.1 + """ + + name = 'Bash Session' + aliases = ['console', 'shell-session'] + filenames = ['*.sh-session', '*.shell-session'] + mimetypes = ['application/x-shell-session', 'application/x-sh-session'] + + _innerLexerCls = BashLexer + _ps1rgx = re.compile( + r'^((?:(?:\[.*?\])|(?:\(\S+\))?(?:| |sh\S*?|\w+\S+[@:]\S+(?:\s+\S+)' \ + r'?|\[\S+[@:][^\n]+\].+))\s*[$#%])(.*\n?)') + _ps2 = '>' + + +class BatchLexer(RegexLexer): + """ + Lexer for the DOS/Windows Batch file format. + + .. versionadded:: 0.7 + """ + name = 'Batchfile' + aliases = ['bat', 'batch', 'dosbatch', 'winbatch'] + filenames = ['*.bat', '*.cmd'] + mimetypes = ['application/x-dos-batch'] + + flags = re.MULTILINE | re.IGNORECASE + + _nl = r'\n\x1a' + _punct = r'&<>|' + _ws = r'\t\v\f\r ,;=\xa0' + _nlws = r'\s\x1a\xa0,;=' + _space = r'(?:(?:(?:\^[%s])?[%s])+)' % (_nl, _ws) + _keyword_terminator = (r'(?=(?:\^[%s]?)?[%s+./:[\\\]]|[%s%s(])' % + (_nl, _ws, _nl, _punct)) + _token_terminator = r'(?=\^?[%s]|[%s%s])' % (_ws, _punct, _nl) + _start_label = r'((?:(?<=^[^:])|^[^:]?)[%s]*)(:)' % _ws + _label = r'(?:(?:[^%s%s+:^]|\^[%s]?[\w\W])*)' % (_nlws, _punct, _nl) + _label_compound = r'(?:(?:[^%s%s+:^)]|\^[%s]?[^)])*)' % (_nlws, _punct, _nl) + _number = r'(?:-?(?:0[0-7]+|0x[\da-f]+|\d+)%s)' % _token_terminator + _opword = r'(?:equ|geq|gtr|leq|lss|neq)' + _string = r'(?:"[^%s"]*(?:"|(?=[%s])))' % (_nl, _nl) + _variable = (r'(?:(?:%%(?:\*|(?:~[a-z]*(?:\$[^:]+:)?)?\d|' + r'[^%%:%s]+(?::(?:~(?:-?\d+)?(?:,(?:-?\d+)?)?|(?:[^%%%s^]|' + r'\^[^%%%s])[^=%s]*=(?:[^%%%s^]|\^[^%%%s])*)?)?%%))|' + r'(?:\^?![^!:%s]+(?::(?:~(?:-?\d+)?(?:,(?:-?\d+)?)?|(?:' + r'[^!%s^]|\^[^!%s])[^=%s]*=(?:[^!%s^]|\^[^!%s])*)?)?\^?!))' % + (_nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl, _nl)) + _core_token = r'(?:(?:(?:\^[%s]?)?[^"%s%s])+)' % (_nl, _nlws, _punct) + _core_token_compound = r'(?:(?:(?:\^[%s]?)?[^"%s%s)])+)' % (_nl, _nlws, _punct) + _token = r'(?:[%s]+|%s)' % (_punct, _core_token) + _token_compound = r'(?:[%s]+|%s)' % (_punct, _core_token_compound) + _stoken = (r'(?:[%s]+|(?:%s|%s|%s)+)' % + (_punct, _string, _variable, _core_token)) + + def _make_begin_state(compound, _core_token=_core_token, + _core_token_compound=_core_token_compound, + _keyword_terminator=_keyword_terminator, + _nl=_nl, _punct=_punct, _string=_string, + _space=_space, _start_label=_start_label, + _stoken=_stoken, _token_terminator=_token_terminator, + _variable=_variable, _ws=_ws): + rest = '(?:%s|%s|[^"%%%s%s%s])*' % (_string, _variable, _nl, _punct, + ')' if compound else '') + rest_of_line = r'(?:(?:[^%s^]|\^[%s]?[\w\W])*)' % (_nl, _nl) + rest_of_line_compound = r'(?:(?:[^%s^)]|\^[%s]?[^)])*)' % (_nl, _nl) + set_space = r'((?:(?:\^[%s]?)?[^\S\n])*)' % _nl + suffix = '' + if compound: + _keyword_terminator = r'(?:(?=\))|%s)' % _keyword_terminator + _token_terminator = r'(?:(?=\))|%s)' % _token_terminator + suffix = '/compound' + return [ + ((r'\)', Punctuation, '#pop') if compound else + (r'\)((?=\()|%s)%s' % (_token_terminator, rest_of_line), + Comment.Single)), + (r'(?=%s)' % _start_label, Text, 'follow%s' % suffix), + (_space, using(this, state='text')), + include('redirect%s' % suffix), + (r'[%s]+' % _nl, Text), + (r'\(', Punctuation, 'root/compound'), + (r'@+', Punctuation), + (r'((?:for|if|rem)(?:(?=(?:\^[%s]?)?/)|(?:(?!\^)|' + r'(?<=m))(?:(?=\()|%s)))(%s?%s?(?:\^[%s]?)?/(?:\^[%s]?)?\?)' % + (_nl, _token_terminator, _space, + _core_token_compound if compound else _core_token, _nl, _nl), + bygroups(Keyword, using(this, state='text')), + 'follow%s' % suffix), + (r'(goto%s)(%s(?:\^[%s]?)?/(?:\^[%s]?)?\?%s)' % + (_keyword_terminator, rest, _nl, _nl, rest), + bygroups(Keyword, using(this, state='text')), + 'follow%s' % suffix), + (words(('assoc', 'break', 'cd', 'chdir', 'cls', 'color', 'copy', + 'date', 'del', 'dir', 'dpath', 'echo', 'endlocal', 'erase', + 'exit', 'ftype', 'keys', 'md', 'mkdir', 'mklink', 'move', + 'path', 'pause', 'popd', 'prompt', 'pushd', 'rd', 'ren', + 'rename', 'rmdir', 'setlocal', 'shift', 'start', 'time', + 'title', 'type', 'ver', 'verify', 'vol'), + suffix=_keyword_terminator), Keyword, 'follow%s' % suffix), + (r'(call)(%s?)(:)' % _space, + bygroups(Keyword, using(this, state='text'), Punctuation), + 'call%s' % suffix), + (r'call%s' % _keyword_terminator, Keyword), + (r'(for%s(?!\^))(%s)(/f%s)' % + (_token_terminator, _space, _token_terminator), + bygroups(Keyword, using(this, state='text'), Keyword), + ('for/f', 'for')), + (r'(for%s(?!\^))(%s)(/l%s)' % + (_token_terminator, _space, _token_terminator), + bygroups(Keyword, using(this, state='text'), Keyword), + ('for/l', 'for')), + (r'for%s(?!\^)' % _token_terminator, Keyword, ('for2', 'for')), + (r'(goto%s)(%s?)(:?)' % (_keyword_terminator, _space), + bygroups(Keyword, using(this, state='text'), Punctuation), + 'label%s' % suffix), + (r'(if(?:(?=\()|%s)(?!\^))(%s?)((?:/i%s)?)(%s?)((?:not%s)?)(%s?)' % + (_token_terminator, _space, _token_terminator, _space, + _token_terminator, _space), + bygroups(Keyword, using(this, state='text'), Keyword, + using(this, state='text'), Keyword, + using(this, state='text')), ('(?', 'if')), + (r'rem(((?=\()|%s)%s?%s?.*|%s%s)' % + (_token_terminator, _space, _stoken, _keyword_terminator, + rest_of_line_compound if compound else rest_of_line), + Comment.Single, 'follow%s' % suffix), + (r'(set%s)%s(/a)' % (_keyword_terminator, set_space), + bygroups(Keyword, using(this, state='text'), Keyword), + 'arithmetic%s' % suffix), + (r'(set%s)%s((?:/p)?)%s((?:(?:(?:\^[%s]?)?[^"%s%s^=%s]|' + r'\^[%s]?[^"=])+)?)((?:(?:\^[%s]?)?=)?)' % + (_keyword_terminator, set_space, set_space, _nl, _nl, _punct, + ')' if compound else '', _nl, _nl), + bygroups(Keyword, using(this, state='text'), Keyword, + using(this, state='text'), using(this, state='variable'), + Punctuation), + 'follow%s' % suffix), + default('follow%s' % suffix) + ] + + def _make_follow_state(compound, _label=_label, + _label_compound=_label_compound, _nl=_nl, + _space=_space, _start_label=_start_label, + _token=_token, _token_compound=_token_compound, + _ws=_ws): + suffix = '/compound' if compound else '' + state = [] + if compound: + state.append((r'(?=\))', Text, '#pop')) + state += [ + (r'%s([%s]*)(%s)(.*)' % + (_start_label, _ws, _label_compound if compound else _label), + bygroups(Text, Punctuation, Text, Name.Label, Comment.Single)), + include('redirect%s' % suffix), + (r'(?=[%s])' % _nl, Text, '#pop'), + (r'\|\|?|&&?', Punctuation, '#pop'), + include('text') + ] + return state + + def _make_arithmetic_state(compound, _nl=_nl, _punct=_punct, + _string=_string, _variable=_variable, + _ws=_ws, _nlws=_nlws): + op = r'=+\-*/!~' + state = [] + if compound: + state.append((r'(?=\))', Text, '#pop')) + state += [ + (r'0[0-7]+', Number.Oct), + (r'0x[\da-f]+', Number.Hex), + (r'\d+', Number.Integer), + (r'[(),]+', Punctuation), + (r'([%s]|%%|\^\^)+' % op, Operator), + (r'(%s|%s|(\^[%s]?)?[^()%s%%\^"%s%s]|\^[%s]?%s)+' % + (_string, _variable, _nl, op, _nlws, _punct, _nlws, + r'[^)]' if compound else r'[\w\W]'), + using(this, state='variable')), + (r'(?=[\x00|&])', Text, '#pop'), + include('follow') + ] + return state + + def _make_call_state(compound, _label=_label, + _label_compound=_label_compound): + state = [] + if compound: + state.append((r'(?=\))', Text, '#pop')) + state.append((r'(:?)(%s)' % (_label_compound if compound else _label), + bygroups(Punctuation, Name.Label), '#pop')) + return state + + def _make_label_state(compound, _label=_label, + _label_compound=_label_compound, _nl=_nl, + _punct=_punct, _string=_string, _variable=_variable): + state = [] + if compound: + state.append((r'(?=\))', Text, '#pop')) + state.append((r'(%s?)((?:%s|%s|\^[%s]?%s|[^"%%^%s%s%s])*)' % + (_label_compound if compound else _label, _string, + _variable, _nl, r'[^)]' if compound else r'[\w\W]', _nl, + _punct, r')' if compound else ''), + bygroups(Name.Label, Comment.Single), '#pop')) + return state + + def _make_redirect_state(compound, + _core_token_compound=_core_token_compound, + _nl=_nl, _punct=_punct, _stoken=_stoken, + _string=_string, _space=_space, + _variable=_variable, _nlws=_nlws): + stoken_compound = (r'(?:[%s]+|(?:%s|%s|%s)+)' % + (_punct, _string, _variable, _core_token_compound)) + return [ + (r'((?:(?<=[%s])\d)?)(>>?&|<&)([%s]*)(\d)' % + (_nlws, _nlws), + bygroups(Number.Integer, Punctuation, Text, Number.Integer)), + (r'((?:(?<=[%s])(?>?|<)(%s?%s)' % + (_nlws, _nl, _space, stoken_compound if compound else _stoken), + bygroups(Number.Integer, Punctuation, using(this, state='text'))) + ] + + tokens = { + 'root': _make_begin_state(False), + 'follow': _make_follow_state(False), + 'arithmetic': _make_arithmetic_state(False), + 'call': _make_call_state(False), + 'label': _make_label_state(False), + 'redirect': _make_redirect_state(False), + 'root/compound': _make_begin_state(True), + 'follow/compound': _make_follow_state(True), + 'arithmetic/compound': _make_arithmetic_state(True), + 'call/compound': _make_call_state(True), + 'label/compound': _make_label_state(True), + 'redirect/compound': _make_redirect_state(True), + 'variable-or-escape': [ + (_variable, Name.Variable), + (r'%%%%|\^[%s]?(\^!|[\w\W])' % _nl, String.Escape) + ], + 'string': [ + (r'"', String.Double, '#pop'), + (_variable, Name.Variable), + (r'\^!|%%', String.Escape), + (r'[^"%%^%s]+|[%%^]' % _nl, String.Double), + default('#pop') + ], + 'sqstring': [ + include('variable-or-escape'), + (r'[^%]+|%', String.Single) + ], + 'bqstring': [ + include('variable-or-escape'), + (r'[^%]+|%', String.Backtick) + ], + 'text': [ + (r'"', String.Double, 'string'), + include('variable-or-escape'), + (r'[^"%%^%s%s\d)]+|.' % (_nlws, _punct), Text) + ], + 'variable': [ + (r'"', String.Double, 'string'), + include('variable-or-escape'), + (r'[^"%%^%s]+|.' % _nl, Name.Variable) + ], + 'for': [ + (r'(%s)(in)(%s)(\()' % (_space, _space), + bygroups(using(this, state='text'), Keyword, + using(this, state='text'), Punctuation), '#pop'), + include('follow') + ], + 'for2': [ + (r'\)', Punctuation), + (r'(%s)(do%s)' % (_space, _token_terminator), + bygroups(using(this, state='text'), Keyword), '#pop'), + (r'[%s]+' % _nl, Text), + include('follow') + ], + 'for/f': [ + (r'(")((?:%s|[^"])*?")([%s]*)(\))' % (_variable, _nlws), + bygroups(String.Double, using(this, state='string'), Text, + Punctuation)), + (r'"', String.Double, ('#pop', 'for2', 'string')), + (r"('(?:%%%%|%s|[\w\W])*?')([%s]*)(\))" % (_variable, _nlws), + bygroups(using(this, state='sqstring'), Text, Punctuation)), + (r'(`(?:%%%%|%s|[\w\W])*?`)([%s]*)(\))' % (_variable, _nlws), + bygroups(using(this, state='bqstring'), Text, Punctuation)), + include('for2') + ], + 'for/l': [ + (r'-?\d+', Number.Integer), + include('for2') + ], + 'if': [ + (r'((?:cmdextversion|errorlevel)%s)(%s)(\d+)' % + (_token_terminator, _space), + bygroups(Keyword, using(this, state='text'), + Number.Integer), '#pop'), + (r'(defined%s)(%s)(%s)' % (_token_terminator, _space, _stoken), + bygroups(Keyword, using(this, state='text'), + using(this, state='variable')), '#pop'), + (r'(exist%s)(%s%s)' % (_token_terminator, _space, _stoken), + bygroups(Keyword, using(this, state='text')), '#pop'), + (r'(%s%s)(%s)(%s%s)' % (_number, _space, _opword, _space, _number), + bygroups(using(this, state='arithmetic'), Operator.Word, + using(this, state='arithmetic')), '#pop'), + (_stoken, using(this, state='text'), ('#pop', 'if2')), + ], + 'if2': [ + (r'(%s?)(==)(%s?%s)' % (_space, _space, _stoken), + bygroups(using(this, state='text'), Operator, + using(this, state='text')), '#pop'), + (r'(%s)(%s)(%s%s)' % (_space, _opword, _space, _stoken), + bygroups(using(this, state='text'), Operator.Word, + using(this, state='text')), '#pop') + ], + '(?': [ + (_space, using(this, state='text')), + (r'\(', Punctuation, ('#pop', 'else?', 'root/compound')), + default('#pop') + ], + 'else?': [ + (_space, using(this, state='text')), + (r'else%s' % _token_terminator, Keyword, '#pop'), + default('#pop') + ] + } + + +class MSDOSSessionLexer(ShellSessionBaseLexer): + """ + Lexer for simplistic MSDOS sessions. + + .. versionadded:: 2.1 + """ + + name = 'MSDOS Session' + aliases = ['doscon'] + filenames = [] + mimetypes = [] + + _innerLexerCls = BatchLexer + _ps1rgx = re.compile(r'^([^>]*>)(.*\n?)') + _ps2 = 'More? ' + + +class TcshLexer(RegexLexer): + """ + Lexer for tcsh scripts. + + .. versionadded:: 0.10 + """ + + name = 'Tcsh' + aliases = ['tcsh', 'csh'] + filenames = ['*.tcsh', '*.csh'] + mimetypes = ['application/x-csh'] + + tokens = { + 'root': [ + include('basic'), + (r'\$\(', Keyword, 'paren'), + (r'\$\{#?', Keyword, 'curly'), + (r'`', String.Backtick, 'backticks'), + include('data'), + ], + 'basic': [ + (r'\b(if|endif|else|while|then|foreach|case|default|' + r'continue|goto|breaksw|end|switch|endsw)\s*\b', + Keyword), + (r'\b(alias|alloc|bg|bindkey|break|builtins|bye|caller|cd|chdir|' + r'complete|dirs|echo|echotc|eval|exec|exit|fg|filetest|getxvers|' + r'glob|getspath|hashstat|history|hup|inlib|jobs|kill|' + r'limit|log|login|logout|ls-F|migrate|newgrp|nice|nohup|notify|' + r'onintr|popd|printenv|pushd|rehash|repeat|rootnode|popd|pushd|' + r'set|shift|sched|setenv|setpath|settc|setty|setxvers|shift|' + r'source|stop|suspend|source|suspend|telltc|time|' + r'umask|unalias|uncomplete|unhash|universe|unlimit|unset|unsetenv|' + r'ver|wait|warp|watchlog|where|which)\s*\b', + Name.Builtin), + (r'#.*', Comment), + (r'\\[\w\W]', String.Escape), + (r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)), + (r'[\[\]{}()=]+', Operator), + (r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String), + (r';', Punctuation), + ], + 'data': [ + (r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double), + (r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), + (r'\s+', Text), + (r'[^=\s\[\]{}()$"\'`\\;#]+', Text), + (r'\d+(?= |\Z)', Number), + (r'\$#?(\w+|.)', Name.Variable), + ], + 'curly': [ + (r'\}', Keyword, '#pop'), + (r':-', Keyword), + (r'\w+', Name.Variable), + (r'[^}:"\'`$]+', Punctuation), + (r':', Punctuation), + include('root'), + ], + 'paren': [ + (r'\)', Keyword, '#pop'), + include('root'), + ], + 'backticks': [ + (r'`', String.Backtick, '#pop'), + include('root'), + ], + } + + +class TcshSessionLexer(ShellSessionBaseLexer): + """ + Lexer for Tcsh sessions. + + .. versionadded:: 2.1 + """ + + name = 'Tcsh Session' + aliases = ['tcshcon'] + filenames = [] + mimetypes = [] + + _innerLexerCls = TcshLexer + _ps1rgx = re.compile(r'^([^>]+>)(.*\n?)') + _ps2 = '? ' + + +class PowerShellLexer(RegexLexer): + """ + For Windows PowerShell code. + + .. versionadded:: 1.5 + """ + name = 'PowerShell' + aliases = ['powershell', 'posh', 'ps1', 'psm1'] + filenames = ['*.ps1', '*.psm1'] + mimetypes = ['text/x-powershell'] + + flags = re.DOTALL | re.IGNORECASE | re.MULTILINE + + keywords = ( + 'while validateset validaterange validatepattern validatelength ' + 'validatecount until trap switch return ref process param parameter in ' + 'if global: function foreach for finally filter end elseif else ' + 'dynamicparam do default continue cmdletbinding break begin alias \\? ' + '% #script #private #local #global mandatory parametersetname position ' + 'valuefrompipeline valuefrompipelinebypropertyname ' + 'valuefromremainingarguments helpmessage try catch throw').split() + + operators = ( + 'and as band bnot bor bxor casesensitive ccontains ceq cge cgt cle ' + 'clike clt cmatch cne cnotcontains cnotlike cnotmatch contains ' + 'creplace eq exact f file ge gt icontains ieq ige igt ile ilike ilt ' + 'imatch ine inotcontains inotlike inotmatch ireplace is isnot le like ' + 'lt match ne not notcontains notlike notmatch or regex replace ' + 'wildcard').split() + + verbs = ( + 'write where watch wait use update unregister unpublish unprotect ' + 'unlock uninstall undo unblock trace test tee take sync switch ' + 'suspend submit stop step start split sort skip show set send select ' + 'search scroll save revoke resume restore restart resolve resize ' + 'reset request repair rename remove register redo receive read push ' + 'publish protect pop ping out optimize open new move mount merge ' + 'measure lock limit join invoke install initialize import hide group ' + 'grant get format foreach find export expand exit enter enable edit ' + 'dismount disconnect disable deny debug cxnew copy convertto ' + 'convertfrom convert connect confirm compress complete compare close ' + 'clear checkpoint block backup assert approve aggregate add').split() + + aliases_ = ( + 'ac asnp cat cd cfs chdir clc clear clhy cli clp cls clv cnsn ' + 'compare copy cp cpi cpp curl cvpa dbp del diff dir dnsn ebp echo epal ' + 'epcsv epsn erase etsn exsn fc fhx fl foreach ft fw gal gbp gc gci gcm ' + 'gcs gdr ghy gi gjb gl gm gmo gp gps gpv group gsn gsnp gsv gu gv gwmi ' + 'h history icm iex ihy ii ipal ipcsv ipmo ipsn irm ise iwmi iwr kill lp ' + 'ls man md measure mi mount move mp mv nal ndr ni nmo npssc nsn nv ogv ' + 'oh popd ps pushd pwd r rbp rcjb rcsn rd rdr ren ri rjb rm rmdir rmo ' + 'rni rnp rp rsn rsnp rujb rv rvpa rwmi sajb sal saps sasv sbp sc select ' + 'set shcm si sl sleep sls sort sp spjb spps spsv start sujb sv swmi tee ' + 'trcm type wget where wjb write').split() + + commenthelp = ( + 'component description example externalhelp forwardhelpcategory ' + 'forwardhelptargetname functionality inputs link ' + 'notes outputs parameter remotehelprunspace role synopsis').split() + + tokens = { + 'root': [ + # we need to count pairs of parentheses for correct highlight + # of '$(...)' blocks in strings + (r'\(', Punctuation, 'child'), + (r'\s+', Text), + (r'^(\s*#[#\s]*)(\.(?:%s))([^\n]*$)' % '|'.join(commenthelp), + bygroups(Comment, String.Doc, Comment)), + (r'#[^\n]*?$', Comment), + (r'(<|<)#', Comment.Multiline, 'multline'), + (r'@"\n', String.Heredoc, 'heredoc-double'), + (r"@'\n.*?\n'@", String.Heredoc), + # escaped syntax + (r'`[\'"$@-]', Punctuation), + (r'"', String.Double, 'string'), + (r"'([^']|'')*'", String.Single), + (r'(\$|@@|@)((global|script|private|env):)?\w+', + Name.Variable), + (r'(%s)\b' % '|'.join(keywords), Keyword), + (r'-(%s)\b' % '|'.join(operators), Operator), + (r'(%s)-[a-z_]\w*\b' % '|'.join(verbs), Name.Builtin), + (r'(%s)\s' % '|'.join(aliases_), Name.Builtin), + (r'\[[a-z_\[][\w. `,\[\]]*\]', Name.Constant), # .net [type]s + (r'-[a-z_]\w*', Name), + (r'\w+', Name), + (r'[.,;@{}\[\]$()=+*/\\&%!~?^`|<>-]|::', Punctuation), + ], + 'child': [ + (r'\)', Punctuation, '#pop'), + include('root'), + ], + 'multline': [ + (r'[^#&.]+', Comment.Multiline), + (r'#(>|>)', Comment.Multiline, '#pop'), + (r'\.(%s)' % '|'.join(commenthelp), String.Doc), + (r'[#&.]', Comment.Multiline), + ], + 'string': [ + (r"`[0abfnrtv'\"$`]", String.Escape), + (r'[^$`"]+', String.Double), + (r'\$\(', Punctuation, 'child'), + (r'""', String.Double), + (r'[`$]', String.Double), + (r'"', String.Double, '#pop'), + ], + 'heredoc-double': [ + (r'\n"@', String.Heredoc, '#pop'), + (r'\$\(', Punctuation, 'child'), + (r'[^@\n]+"]', String.Heredoc), + (r".", String.Heredoc), + ] + } + + +class PowerShellSessionLexer(ShellSessionBaseLexer): + """ + Lexer for simplistic Windows PowerShell sessions. + + .. versionadded:: 2.1 + """ + + name = 'PowerShell Session' + aliases = ['ps1con'] + filenames = [] + mimetypes = [] + + _innerLexerCls = PowerShellLexer + _ps1rgx = re.compile(r'^((?:\[[^]]+\]: )?PS[^>]*> ?)(.*\n?)') + _ps2 = '>> ' + + +class FishShellLexer(RegexLexer): + """ + Lexer for Fish shell scripts. + + .. versionadded:: 2.1 + """ + + name = 'Fish' + aliases = ['fish', 'fishshell'] + filenames = ['*.fish', '*.load'] + mimetypes = ['application/x-fish'] + + tokens = { + 'root': [ + include('basic'), + include('data'), + include('interp'), + ], + 'interp': [ + (r'\$\(\(', Keyword, 'math'), + (r'\(', Keyword, 'paren'), + (r'\$#?(\w+|.)', Name.Variable), + ], + 'basic': [ + (r'\b(begin|end|if|else|while|break|for|in|return|function|block|' + r'case|continue|switch|not|and|or|set|echo|exit|pwd|true|false|' + r'cd|count|test)(\s*)\b', + bygroups(Keyword, Text)), + (r'\b(alias|bg|bind|breakpoint|builtin|command|commandline|' + r'complete|contains|dirh|dirs|emit|eval|exec|fg|fish|fish_config|' + r'fish_indent|fish_pager|fish_prompt|fish_right_prompt|' + r'fish_update_completions|fishd|funced|funcsave|functions|help|' + r'history|isatty|jobs|math|mimedb|nextd|open|popd|prevd|psub|' + r'pushd|random|read|set_color|source|status|trap|type|ulimit|' + r'umask|vared|fc|getopts|hash|kill|printf|time|wait)\s*\b(?!\.)', + Name.Builtin), + (r'#.*\n', Comment), + (r'\\[\w\W]', String.Escape), + (r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)), + (r'[\[\]()=]', Operator), + (r'<<-?\s*(\'?)\\?(\w+)[\w\W]+?\2', String), + ], + 'data': [ + (r'(?s)\$?"(\\\\|\\[0-7]+|\\.|[^"\\$])*"', String.Double), + (r'"', String.Double, 'string'), + (r"(?s)\$'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), + (r"(?s)'.*?'", String.Single), + (r';', Punctuation), + (r'&|\||\^|<|>', Operator), + (r'\s+', Text), + (r'\d+(?= |\Z)', Number), + (r'[^=\s\[\]{}()$"\'`\\<&|;]+', Text), + ], + 'string': [ + (r'"', String.Double, '#pop'), + (r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\$])+', String.Double), + include('interp'), + ], + 'paren': [ + (r'\)', Keyword, '#pop'), + include('root'), + ], + 'math': [ + (r'\)\)', Keyword, '#pop'), + (r'[-+*/%^|&]|\*\*|\|\|', Operator), + (r'\d+#\d+', Number), + (r'\d+#(?! )', Number), + (r'\d+', Number), + include('root'), + ], + } + +class ExeclineLexer(RegexLexer): + """ + Lexer for Laurent Bercot's execline language + (https://skarnet.org/software/execline). + + .. versionadded:: 2.7 + """ + + name = 'execline' + aliases = ['execline'] + filenames = ['*.exec'] + + tokens = { + 'root': [ + include('basic'), + include('data'), + include('interp') + ], + 'interp': [ + (r'\$\{', String.Interpol, 'curly'), + (r'\$[\w@#]+', Name.Variable), # user variable + (r'\$', Text), + ], + 'basic': [ + (r'\b(background|backtick|cd|define|dollarat|elgetopt|' + r'elgetpositionals|elglob|emptyenv|envfile|exec|execlineb|' + r'exit|export|fdblock|fdclose|fdmove|fdreserve|fdswap|' + r'forbacktickx|foreground|forstdin|forx|getcwd|getpid|heredoc|' + r'homeof|if|ifelse|ifte|ifthenelse|importas|loopwhilex|' + r'multidefine|multisubstitute|pipeline|piperw|posix-cd|' + r'redirfd|runblock|shift|trap|tryexec|umask|unexport|wait|' + r'withstdinas)\b', Name.Builtin), + (r'\A#!.+\n', Comment.Hashbang), + (r'#.*\n', Comment.Single), + (r'[{}]', Operator) + ], + 'data': [ + (r'(?s)"(\\.|[^"\\$])*"', String.Double), + (r'"', String.Double, 'string'), + (r'\s+', Text), + (r'[^\s{}$"\\]+', Text) + ], + 'string': [ + (r'"', String.Double, '#pop'), + (r'(?s)(\\\\|\\.|[^"\\$])+', String.Double), + include('interp'), + ], + 'curly': [ + (r'\}', String.Interpol, '#pop'), + (r'[\w#@]+', Name.Variable), + include('root') + ] + + } + + def analyse_text(text): + if shebang_matches(text, r'execlineb'): + return 1 diff --git a/pygments/lexers/sieve.py b/pygments/lexers/sieve.py old mode 100644 new mode 100755 index 814cb10..ae6bada --- a/pygments/lexers/sieve.py +++ b/pygments/lexers/sieve.py @@ -1,69 +1,69 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.sieve - ~~~~~~~~~~~~~~~~~~~~~ - - Lexer for Sieve file format. - - https://tools.ietf.org/html/rfc5228 - https://tools.ietf.org/html/rfc5173 - https://tools.ietf.org/html/rfc5229 - https://tools.ietf.org/html/rfc5230 - https://tools.ietf.org/html/rfc5232 - https://tools.ietf.org/html/rfc5235 - https://tools.ietf.org/html/rfc5429 - https://tools.ietf.org/html/rfc8580 - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import RegexLexer, bygroups -from pygments.token import Comment, Name, Literal, String, Text, Punctuation, Keyword - -__all__ = ["SieveLexer"] - - -class SieveLexer(RegexLexer): - """ - Lexer for sieve format. - """ - name = 'Sieve' - filenames = ['*.siv', '*.sieve'] - aliases = ['sieve'] - - tokens = { - 'root': [ - (r'\s+', Text), - (r'[();,{}\[\]]', Punctuation), - # import: - (r'(?i)require', - Keyword.Namespace), - # tags: - (r'(?i)(:)(addresses|all|contains|content|create|copy|comparator|count|days|detail|domain|fcc|flags|from|handle|importance|is|localpart|length|lowerfirst|lower|matches|message|mime|options|over|percent|quotewildcard|raw|regex|specialuse|subject|text|under|upperfirst|upper|value)', - bygroups(Name.Tag, Name.Tag)), - # tokens: - (r'(?i)(address|addflag|allof|anyof|body|discard|elsif|else|envelope|ereject|exists|false|fileinto|if|hasflag|header|keep|notify_method_capability|notify|not|redirect|reject|removeflag|setflag|size|spamtest|stop|string|true|vacation|virustest)', - Name.Builtin), - (r'(?i)set', - Keyword.Declaration), - # number: - (r'([0-9.]+)([kmgKMG])?', - bygroups(Literal.Number, Literal.Number)), - # comment: - (r'#.*$', - Comment.Single), - (r'/\*.*\*/', - Comment.Multiline), - # string: - (r'"[^"]*?"', - String), - # text block: - (r'text:', - Name.Tag, 'text'), - ], - 'text': [ - (r'[^.].*?\n', String), - (r'^\.', Punctuation, "#pop"), - ] - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.sieve + ~~~~~~~~~~~~~~~~~~~~~ + + Lexer for Sieve file format. + + https://tools.ietf.org/html/rfc5228 + https://tools.ietf.org/html/rfc5173 + https://tools.ietf.org/html/rfc5229 + https://tools.ietf.org/html/rfc5230 + https://tools.ietf.org/html/rfc5232 + https://tools.ietf.org/html/rfc5235 + https://tools.ietf.org/html/rfc5429 + https://tools.ietf.org/html/rfc8580 + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, bygroups +from pygments.token import Comment, Name, Literal, String, Text, Punctuation, Keyword + +__all__ = ["SieveLexer"] + + +class SieveLexer(RegexLexer): + """ + Lexer for sieve format. + """ + name = 'Sieve' + filenames = ['*.siv', '*.sieve'] + aliases = ['sieve'] + + tokens = { + 'root': [ + (r'\s+', Text), + (r'[();,{}\[\]]', Punctuation), + # import: + (r'(?i)require', + Keyword.Namespace), + # tags: + (r'(?i)(:)(addresses|all|contains|content|create|copy|comparator|count|days|detail|domain|fcc|flags|from|handle|importance|is|localpart|length|lowerfirst|lower|matches|message|mime|options|over|percent|quotewildcard|raw|regex|specialuse|subject|text|under|upperfirst|upper|value)', + bygroups(Name.Tag, Name.Tag)), + # tokens: + (r'(?i)(address|addflag|allof|anyof|body|discard|elsif|else|envelope|ereject|exists|false|fileinto|if|hasflag|header|keep|notify_method_capability|notify|not|redirect|reject|removeflag|setflag|size|spamtest|stop|string|true|vacation|virustest)', + Name.Builtin), + (r'(?i)set', + Keyword.Declaration), + # number: + (r'([0-9.]+)([kmgKMG])?', + bygroups(Literal.Number, Literal.Number)), + # comment: + (r'#.*$', + Comment.Single), + (r'/\*.*\*/', + Comment.Multiline), + # string: + (r'"[^"]*?"', + String), + # text block: + (r'text:', + Name.Tag, 'text'), + ], + 'text': [ + (r'[^.].*?\n', String), + (r'^\.', Punctuation, "#pop"), + ] + } diff --git a/pygments/lexers/slash.py b/pygments/lexers/slash.py old mode 100644 new mode 100755 index 76e5929..d796081 --- a/pygments/lexers/slash.py +++ b/pygments/lexers/slash.py @@ -1,185 +1,185 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.slash - ~~~~~~~~~~~~~~~~~~~~~ - - Lexer for the `Slash `_ programming - language. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import ExtendedRegexLexer, bygroups, DelegatingLexer -from pygments.token import Name, Number, String, Comment, Punctuation, \ - Other, Keyword, Operator, Whitespace - -__all__ = ['SlashLexer'] - - -class SlashLanguageLexer(ExtendedRegexLexer): - _nkw = r'(?=[^a-zA-Z_0-9])' - - def move_state(new_state): - return ("#pop", new_state) - - def right_angle_bracket(lexer, match, ctx): - if len(ctx.stack) > 1 and ctx.stack[-2] == "string": - ctx.stack.pop() - yield match.start(), String.Interpol, u"}" - ctx.pos = match.end() - pass - - tokens = { - "root": [ - (r"<%=", Comment.Preproc, move_state("slash")), - (r"<%!!", Comment.Preproc, move_state("slash")), - (r"<%#.*?%>", Comment.Multiline), - (r"<%", Comment.Preproc, move_state("slash")), - (r".|\n", Other), - ], - "string": [ - (r"\\", String.Escape, move_state("string_e")), - (r"\"", String, move_state("slash")), - (r"#\{", String.Interpol, "slash"), - (r'.|\n', String), - ], - "string_e": [ - (r'n', String.Escape, move_state("string")), - (r't', String.Escape, move_state("string")), - (r'r', String.Escape, move_state("string")), - (r'e', String.Escape, move_state("string")), - (r'x[a-fA-F0-9]{2}', String.Escape, move_state("string")), - (r'.', String.Escape, move_state("string")), - ], - "regexp": [ - (r'}[a-z]*', String.Regex, move_state("slash")), - (r'\\(.|\n)', String.Regex), - (r'{', String.Regex, "regexp_r"), - (r'.|\n', String.Regex), - ], - "regexp_r": [ - (r'}[a-z]*', String.Regex, "#pop"), - (r'\\(.|\n)', String.Regex), - (r'{', String.Regex, "regexp_r"), - ], - "slash": [ - (r"%>", Comment.Preproc, move_state("root")), - (r"\"", String, move_state("string")), - (r"'[a-zA-Z0-9_]+", String), - (r'%r{', String.Regex, move_state("regexp")), - (r'/\*.*?\*/', Comment.Multiline), - (r"(#|//).*?\n", Comment.Single), - (r'-?[0-9]+e[+-]?[0-9]+', Number.Float), - (r'-?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?', Number.Float), - (r'-?[0-9]+', Number.Integer), - (r'nil'+_nkw, Name.Builtin), - (r'true'+_nkw, Name.Builtin), - (r'false'+_nkw, Name.Builtin), - (r'self'+_nkw, Name.Builtin), - (r'(class)(\s+)([A-Z][a-zA-Z0-9_\']*)', - bygroups(Keyword, Whitespace, Name.Class)), - (r'class'+_nkw, Keyword), - (r'extends'+_nkw, Keyword), - (r'(def)(\s+)(self)(\s*)(\.)(\s*)([a-z_][a-zA-Z0-9_\']*=?|<<|>>|==|<=>|<=|<|>=|>|\+|-(self)?|~(self)?|\*|/|%|^|&&|&|\||\[\]=?)', - bygroups(Keyword, Whitespace, Name.Builtin, Whitespace, Punctuation, Whitespace, Name.Function)), - (r'(def)(\s+)([a-z_][a-zA-Z0-9_\']*=?|<<|>>|==|<=>|<=|<|>=|>|\+|-(self)?|~(self)?|\*|/|%|^|&&|&|\||\[\]=?)', - bygroups(Keyword, Whitespace, Name.Function)), - (r'def'+_nkw, Keyword), - (r'if'+_nkw, Keyword), - (r'elsif'+_nkw, Keyword), - (r'else'+_nkw, Keyword), - (r'unless'+_nkw, Keyword), - (r'for'+_nkw, Keyword), - (r'in'+_nkw, Keyword), - (r'while'+_nkw, Keyword), - (r'until'+_nkw, Keyword), - (r'and'+_nkw, Keyword), - (r'or'+_nkw, Keyword), - (r'not'+_nkw, Keyword), - (r'lambda'+_nkw, Keyword), - (r'try'+_nkw, Keyword), - (r'catch'+_nkw, Keyword), - (r'return'+_nkw, Keyword), - (r'next'+_nkw, Keyword), - (r'last'+_nkw, Keyword), - (r'throw'+_nkw, Keyword), - (r'use'+_nkw, Keyword), - (r'switch'+_nkw, Keyword), - (r'\\', Keyword), - (r'λ', Keyword), - (r'__FILE__'+_nkw, Name.Builtin.Pseudo), - (r'__LINE__'+_nkw, Name.Builtin.Pseudo), - (r'[A-Z][a-zA-Z0-9_\']*'+_nkw, Name.Constant), - (r'[a-z_][a-zA-Z0-9_\']*'+_nkw, Name), - (r'@[a-z_][a-zA-Z0-9_\']*'+_nkw, Name.Variable.Instance), - (r'@@[a-z_][a-zA-Z0-9_\']*'+_nkw, Name.Variable.Class), - (r'\(', Punctuation), - (r'\)', Punctuation), - (r'\[', Punctuation), - (r'\]', Punctuation), - (r'\{', Punctuation), - (r'\}', right_angle_bracket), - (r';', Punctuation), - (r',', Punctuation), - (r'<<=', Operator), - (r'>>=', Operator), - (r'<<', Operator), - (r'>>', Operator), - (r'==', Operator), - (r'!=', Operator), - (r'=>', Operator), - (r'=', Operator), - (r'<=>', Operator), - (r'<=', Operator), - (r'>=', Operator), - (r'<', Operator), - (r'>', Operator), - (r'\+\+', Operator), - (r'\+=', Operator), - (r'-=', Operator), - (r'\*\*=', Operator), - (r'\*=', Operator), - (r'\*\*', Operator), - (r'\*', Operator), - (r'/=', Operator), - (r'\+', Operator), - (r'-', Operator), - (r'/', Operator), - (r'%=', Operator), - (r'%', Operator), - (r'^=', Operator), - (r'&&=', Operator), - (r'&=', Operator), - (r'&&', Operator), - (r'&', Operator), - (r'\|\|=', Operator), - (r'\|=', Operator), - (r'\|\|', Operator), - (r'\|', Operator), - (r'!', Operator), - (r'\.\.\.', Operator), - (r'\.\.', Operator), - (r'\.', Operator), - (r'::', Operator), - (r':', Operator), - (r'(\s|\n)+', Whitespace), - (r'[a-z_][a-zA-Z0-9_\']*', Name.Variable), - ], - } - - -class SlashLexer(DelegatingLexer): - """ - Lexer for the Slash programming language. - - .. versionadded:: 2.4 - """ - - name = 'Slash' - aliases = ['slash'] - filenames = ['*.sl'] - - def __init__(self, **options): - from pygments.lexers.web import HtmlLexer - super(SlashLexer, self).__init__(HtmlLexer, SlashLanguageLexer, **options) +# -*- coding: utf-8 -*- +""" + pygments.lexers.slash + ~~~~~~~~~~~~~~~~~~~~~ + + Lexer for the `Slash `_ programming + language. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import ExtendedRegexLexer, bygroups, DelegatingLexer +from pygments.token import Name, Number, String, Comment, Punctuation, \ + Other, Keyword, Operator, Whitespace + +__all__ = ['SlashLexer'] + + +class SlashLanguageLexer(ExtendedRegexLexer): + _nkw = r'(?=[^a-zA-Z_0-9])' + + def move_state(new_state): + return ("#pop", new_state) + + def right_angle_bracket(lexer, match, ctx): + if len(ctx.stack) > 1 and ctx.stack[-2] == "string": + ctx.stack.pop() + yield match.start(), String.Interpol, '}' + ctx.pos = match.end() + pass + + tokens = { + "root": [ + (r"<%=", Comment.Preproc, move_state("slash")), + (r"<%!!", Comment.Preproc, move_state("slash")), + (r"<%#.*?%>", Comment.Multiline), + (r"<%", Comment.Preproc, move_state("slash")), + (r".|\n", Other), + ], + "string": [ + (r"\\", String.Escape, move_state("string_e")), + (r"\"", String, move_state("slash")), + (r"#\{", String.Interpol, "slash"), + (r'.|\n', String), + ], + "string_e": [ + (r'n', String.Escape, move_state("string")), + (r't', String.Escape, move_state("string")), + (r'r', String.Escape, move_state("string")), + (r'e', String.Escape, move_state("string")), + (r'x[a-fA-F0-9]{2}', String.Escape, move_state("string")), + (r'.', String.Escape, move_state("string")), + ], + "regexp": [ + (r'}[a-z]*', String.Regex, move_state("slash")), + (r'\\(.|\n)', String.Regex), + (r'{', String.Regex, "regexp_r"), + (r'.|\n', String.Regex), + ], + "regexp_r": [ + (r'}[a-z]*', String.Regex, "#pop"), + (r'\\(.|\n)', String.Regex), + (r'{', String.Regex, "regexp_r"), + ], + "slash": [ + (r"%>", Comment.Preproc, move_state("root")), + (r"\"", String, move_state("string")), + (r"'[a-zA-Z0-9_]+", String), + (r'%r{', String.Regex, move_state("regexp")), + (r'/\*.*?\*/', Comment.Multiline), + (r"(#|//).*?\n", Comment.Single), + (r'-?[0-9]+e[+-]?[0-9]+', Number.Float), + (r'-?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?', Number.Float), + (r'-?[0-9]+', Number.Integer), + (r'nil'+_nkw, Name.Builtin), + (r'true'+_nkw, Name.Builtin), + (r'false'+_nkw, Name.Builtin), + (r'self'+_nkw, Name.Builtin), + (r'(class)(\s+)([A-Z][a-zA-Z0-9_\']*)', + bygroups(Keyword, Whitespace, Name.Class)), + (r'class'+_nkw, Keyword), + (r'extends'+_nkw, Keyword), + (r'(def)(\s+)(self)(\s*)(\.)(\s*)([a-z_][a-zA-Z0-9_\']*=?|<<|>>|==|<=>|<=|<|>=|>|\+|-(self)?|~(self)?|\*|/|%|^|&&|&|\||\[\]=?)', + bygroups(Keyword, Whitespace, Name.Builtin, Whitespace, Punctuation, Whitespace, Name.Function)), + (r'(def)(\s+)([a-z_][a-zA-Z0-9_\']*=?|<<|>>|==|<=>|<=|<|>=|>|\+|-(self)?|~(self)?|\*|/|%|^|&&|&|\||\[\]=?)', + bygroups(Keyword, Whitespace, Name.Function)), + (r'def'+_nkw, Keyword), + (r'if'+_nkw, Keyword), + (r'elsif'+_nkw, Keyword), + (r'else'+_nkw, Keyword), + (r'unless'+_nkw, Keyword), + (r'for'+_nkw, Keyword), + (r'in'+_nkw, Keyword), + (r'while'+_nkw, Keyword), + (r'until'+_nkw, Keyword), + (r'and'+_nkw, Keyword), + (r'or'+_nkw, Keyword), + (r'not'+_nkw, Keyword), + (r'lambda'+_nkw, Keyword), + (r'try'+_nkw, Keyword), + (r'catch'+_nkw, Keyword), + (r'return'+_nkw, Keyword), + (r'next'+_nkw, Keyword), + (r'last'+_nkw, Keyword), + (r'throw'+_nkw, Keyword), + (r'use'+_nkw, Keyword), + (r'switch'+_nkw, Keyword), + (r'\\', Keyword), + (r'λ', Keyword), + (r'__FILE__'+_nkw, Name.Builtin.Pseudo), + (r'__LINE__'+_nkw, Name.Builtin.Pseudo), + (r'[A-Z][a-zA-Z0-9_\']*'+_nkw, Name.Constant), + (r'[a-z_][a-zA-Z0-9_\']*'+_nkw, Name), + (r'@[a-z_][a-zA-Z0-9_\']*'+_nkw, Name.Variable.Instance), + (r'@@[a-z_][a-zA-Z0-9_\']*'+_nkw, Name.Variable.Class), + (r'\(', Punctuation), + (r'\)', Punctuation), + (r'\[', Punctuation), + (r'\]', Punctuation), + (r'\{', Punctuation), + (r'\}', right_angle_bracket), + (r';', Punctuation), + (r',', Punctuation), + (r'<<=', Operator), + (r'>>=', Operator), + (r'<<', Operator), + (r'>>', Operator), + (r'==', Operator), + (r'!=', Operator), + (r'=>', Operator), + (r'=', Operator), + (r'<=>', Operator), + (r'<=', Operator), + (r'>=', Operator), + (r'<', Operator), + (r'>', Operator), + (r'\+\+', Operator), + (r'\+=', Operator), + (r'-=', Operator), + (r'\*\*=', Operator), + (r'\*=', Operator), + (r'\*\*', Operator), + (r'\*', Operator), + (r'/=', Operator), + (r'\+', Operator), + (r'-', Operator), + (r'/', Operator), + (r'%=', Operator), + (r'%', Operator), + (r'^=', Operator), + (r'&&=', Operator), + (r'&=', Operator), + (r'&&', Operator), + (r'&', Operator), + (r'\|\|=', Operator), + (r'\|=', Operator), + (r'\|\|', Operator), + (r'\|', Operator), + (r'!', Operator), + (r'\.\.\.', Operator), + (r'\.\.', Operator), + (r'\.', Operator), + (r'::', Operator), + (r':', Operator), + (r'(\s|\n)+', Whitespace), + (r'[a-z_][a-zA-Z0-9_\']*', Name.Variable), + ], + } + + +class SlashLexer(DelegatingLexer): + """ + Lexer for the Slash programming language. + + .. versionadded:: 2.4 + """ + + name = 'Slash' + aliases = ['slash'] + filenames = ['*.sl'] + + def __init__(self, **options): + from pygments.lexers.web import HtmlLexer + super().__init__(HtmlLexer, SlashLanguageLexer, **options) diff --git a/pygments/lexers/smalltalk.py b/pygments/lexers/smalltalk.py old mode 100644 new mode 100755 index 0e4584f..cdb0183 --- a/pygments/lexers/smalltalk.py +++ b/pygments/lexers/smalltalk.py @@ -1,195 +1,195 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.smalltalk - ~~~~~~~~~~~~~~~~~~~~~~~~~ - - Lexers for Smalltalk and related languages. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import RegexLexer, include, bygroups, default -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation - -__all__ = ['SmalltalkLexer', 'NewspeakLexer'] - - -class SmalltalkLexer(RegexLexer): - """ - For `Smalltalk `_ syntax. - Contributed by Stefan Matthias Aust. - Rewritten by Nils Winter. - - .. versionadded:: 0.10 - """ - name = 'Smalltalk' - filenames = ['*.st'] - aliases = ['smalltalk', 'squeak', 'st'] - mimetypes = ['text/x-smalltalk'] - - tokens = { - 'root': [ - (r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)), - include('squeak fileout'), - include('whitespaces'), - include('method definition'), - (r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)), - include('objects'), - (r'\^|\:=|\_', Operator), - # temporaries - (r'[\]({}.;!]', Text), - ], - 'method definition': [ - # Not perfect can't allow whitespaces at the beginning and the - # without breaking everything - (r'([a-zA-Z]+\w*:)(\s*)(\w+)', - bygroups(Name.Function, Text, Name.Variable)), - (r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)), - (r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$', - bygroups(Name.Function, Text, Name.Variable, Text)), - ], - 'blockvariables': [ - include('whitespaces'), - (r'(:)(\s*)(\w+)', - bygroups(Operator, Text, Name.Variable)), - (r'\|', Operator, '#pop'), - default('#pop'), # else pop - ], - 'literals': [ - (r"'(''|[^'])*'", String, 'afterobject'), - (r'\$.', String.Char, 'afterobject'), - (r'#\(', String.Symbol, 'parenth'), - (r'\)', Text, 'afterobject'), - (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'), - ], - '_parenth_helper': [ - include('whitespaces'), - (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number), - (r'[-+*/\\~<>=|&#!?,@%\w:]+', String.Symbol), - # literals - (r"'(''|[^'])*'", String), - (r'\$.', String.Char), - (r'#*\(', String.Symbol, 'inner_parenth'), - ], - 'parenth': [ - # This state is a bit tricky since - # we can't just pop this state - (r'\)', String.Symbol, ('root', 'afterobject')), - include('_parenth_helper'), - ], - 'inner_parenth': [ - (r'\)', String.Symbol, '#pop'), - include('_parenth_helper'), - ], - 'whitespaces': [ - # skip whitespace and comments - (r'\s+', Text), - (r'"(""|[^"])*"', Comment), - ], - 'objects': [ - (r'\[', Text, 'blockvariables'), - (r'\]', Text, 'afterobject'), - (r'\b(self|super|true|false|nil|thisContext)\b', - Name.Builtin.Pseudo, 'afterobject'), - (r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'), - (r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'), - (r'#("(""|[^"])*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)', - String.Symbol, 'afterobject'), - include('literals'), - ], - 'afterobject': [ - (r'! !$', Keyword, '#pop'), # squeak chunk delimiter - include('whitespaces'), - (r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)', - Name.Builtin, '#pop'), - (r'\b(new\b(?!:))', Name.Builtin), - (r'\:=|\_', Operator, '#pop'), - (r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'), - (r'\b[a-zA-Z]+\w*', Name.Function), - (r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'), - (r'\.', Punctuation, '#pop'), - (r';', Punctuation), - (r'[\])}]', Text), - (r'[\[({]', Text, '#pop'), - ], - 'squeak fileout': [ - # Squeak fileout format (optional) - (r'^"(""|[^"])*"!', Keyword), - (r"^'(''|[^'])*'!", Keyword), - (r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)', - bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)), - (r"^(!)(\w+(?: class)?)( methodsFor: )('(?:''|[^'])*')(.*?!)", - bygroups(Keyword, Name.Class, Keyword, String, Keyword)), - (r'^(\w+)( subclass: )(#\w+)' - r'(\s+instanceVariableNames: )(.*?)' - r'(\s+classVariableNames: )(.*?)' - r'(\s+poolDictionaries: )(.*?)' - r'(\s+category: )(.*?)(!)', - bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword, - String, Keyword, String, Keyword, String, Keyword)), - (r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)', - bygroups(Name.Class, Keyword, String, Keyword)), - (r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)), - (r'! !$', Keyword), - ], - } - - -class NewspeakLexer(RegexLexer): - """ - For `Newspeak ` syntax. - - .. versionadded:: 1.1 - """ - name = 'Newspeak' - filenames = ['*.ns2'] - aliases = ['newspeak', ] - mimetypes = ['text/x-newspeak'] - - tokens = { - 'root': [ - (r'\b(Newsqueak2)\b', Keyword.Declaration), - (r"'[^']*'", String), - (r'\b(class)(\s+)(\w+)(\s*)', - bygroups(Keyword.Declaration, Text, Name.Class, Text)), - (r'\b(mixin|self|super|private|public|protected|nil|true|false)\b', - Keyword), - (r'(\w+\:)(\s*)([a-zA-Z_]\w+)', - bygroups(Name.Function, Text, Name.Variable)), - (r'(\w+)(\s*)(=)', - bygroups(Name.Attribute, Text, Operator)), - (r'<\w+>', Comment.Special), - include('expressionstat'), - include('whitespace') - ], - - 'expressionstat': [ - (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), - (r'\d+', Number.Integer), - (r':\w+', Name.Variable), - (r'(\w+)(::)', bygroups(Name.Variable, Operator)), - (r'\w+:', Name.Function), - (r'\w+', Name.Variable), - (r'\(|\)', Punctuation), - (r'\[|\]', Punctuation), - (r'\{|\}', Punctuation), - - (r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator), - (r'\.|;', Punctuation), - include('whitespace'), - include('literals'), - ], - 'literals': [ - (r'\$.', String), - (r"'[^']*'", String), - (r"#'[^']*'", String.Symbol), - (r"#\w+:?", String.Symbol), - (r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol) - ], - 'whitespace': [ - (r'\s+', Text), - (r'"[^"]*"', Comment) - ], - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.smalltalk + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Smalltalk and related languages. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, include, bygroups, default +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['SmalltalkLexer', 'NewspeakLexer'] + + +class SmalltalkLexer(RegexLexer): + """ + For `Smalltalk `_ syntax. + Contributed by Stefan Matthias Aust. + Rewritten by Nils Winter. + + .. versionadded:: 0.10 + """ + name = 'Smalltalk' + filenames = ['*.st'] + aliases = ['smalltalk', 'squeak', 'st'] + mimetypes = ['text/x-smalltalk'] + + tokens = { + 'root': [ + (r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)), + include('squeak fileout'), + include('whitespaces'), + include('method definition'), + (r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)), + include('objects'), + (r'\^|\:=|\_', Operator), + # temporaries + (r'[\]({}.;!]', Text), + ], + 'method definition': [ + # Not perfect can't allow whitespaces at the beginning and the + # without breaking everything + (r'([a-zA-Z]+\w*:)(\s*)(\w+)', + bygroups(Name.Function, Text, Name.Variable)), + (r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)), + (r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$', + bygroups(Name.Function, Text, Name.Variable, Text)), + ], + 'blockvariables': [ + include('whitespaces'), + (r'(:)(\s*)(\w+)', + bygroups(Operator, Text, Name.Variable)), + (r'\|', Operator, '#pop'), + default('#pop'), # else pop + ], + 'literals': [ + (r"'(''|[^'])*'", String, 'afterobject'), + (r'\$.', String.Char, 'afterobject'), + (r'#\(', String.Symbol, 'parenth'), + (r'\)', Text, 'afterobject'), + (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'), + ], + '_parenth_helper': [ + include('whitespaces'), + (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number), + (r'[-+*/\\~<>=|&#!?,@%\w:]+', String.Symbol), + # literals + (r"'(''|[^'])*'", String), + (r'\$.', String.Char), + (r'#*\(', String.Symbol, 'inner_parenth'), + ], + 'parenth': [ + # This state is a bit tricky since + # we can't just pop this state + (r'\)', String.Symbol, ('root', 'afterobject')), + include('_parenth_helper'), + ], + 'inner_parenth': [ + (r'\)', String.Symbol, '#pop'), + include('_parenth_helper'), + ], + 'whitespaces': [ + # skip whitespace and comments + (r'\s+', Text), + (r'"(""|[^"])*"', Comment), + ], + 'objects': [ + (r'\[', Text, 'blockvariables'), + (r'\]', Text, 'afterobject'), + (r'\b(self|super|true|false|nil|thisContext)\b', + Name.Builtin.Pseudo, 'afterobject'), + (r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'), + (r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'), + (r'#("(""|[^"])*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)', + String.Symbol, 'afterobject'), + include('literals'), + ], + 'afterobject': [ + (r'! !$', Keyword, '#pop'), # squeak chunk delimiter + include('whitespaces'), + (r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)', + Name.Builtin, '#pop'), + (r'\b(new\b(?!:))', Name.Builtin), + (r'\:=|\_', Operator, '#pop'), + (r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'), + (r'\b[a-zA-Z]+\w*', Name.Function), + (r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'), + (r'\.', Punctuation, '#pop'), + (r';', Punctuation), + (r'[\])}]', Text), + (r'[\[({]', Text, '#pop'), + ], + 'squeak fileout': [ + # Squeak fileout format (optional) + (r'^"(""|[^"])*"!', Keyword), + (r"^'(''|[^'])*'!", Keyword), + (r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)', + bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)), + (r"^(!)(\w+(?: class)?)( methodsFor: )('(?:''|[^'])*')(.*?!)", + bygroups(Keyword, Name.Class, Keyword, String, Keyword)), + (r'^(\w+)( subclass: )(#\w+)' + r'(\s+instanceVariableNames: )(.*?)' + r'(\s+classVariableNames: )(.*?)' + r'(\s+poolDictionaries: )(.*?)' + r'(\s+category: )(.*?)(!)', + bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword, + String, Keyword, String, Keyword, String, Keyword)), + (r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)', + bygroups(Name.Class, Keyword, String, Keyword)), + (r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)), + (r'! !$', Keyword), + ], + } + + +class NewspeakLexer(RegexLexer): + """ + For `Newspeak `_ syntax. + + .. versionadded:: 1.1 + """ + name = 'Newspeak' + filenames = ['*.ns2'] + aliases = ['newspeak', ] + mimetypes = ['text/x-newspeak'] + + tokens = { + 'root': [ + (r'\b(Newsqueak2)\b', Keyword.Declaration), + (r"'[^']*'", String), + (r'\b(class)(\s+)(\w+)(\s*)', + bygroups(Keyword.Declaration, Text, Name.Class, Text)), + (r'\b(mixin|self|super|private|public|protected|nil|true|false)\b', + Keyword), + (r'(\w+\:)(\s*)([a-zA-Z_]\w+)', + bygroups(Name.Function, Text, Name.Variable)), + (r'(\w+)(\s*)(=)', + bygroups(Name.Attribute, Text, Operator)), + (r'<\w+>', Comment.Special), + include('expressionstat'), + include('whitespace') + ], + + 'expressionstat': [ + (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), + (r'\d+', Number.Integer), + (r':\w+', Name.Variable), + (r'(\w+)(::)', bygroups(Name.Variable, Operator)), + (r'\w+:', Name.Function), + (r'\w+', Name.Variable), + (r'\(|\)', Punctuation), + (r'\[|\]', Punctuation), + (r'\{|\}', Punctuation), + + (r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator), + (r'\.|;', Punctuation), + include('whitespace'), + include('literals'), + ], + 'literals': [ + (r'\$.', String), + (r"'[^']*'", String), + (r"#'[^']*'", String.Symbol), + (r"#\w+:?", String.Symbol), + (r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol) + ], + 'whitespace': [ + (r'\s+', Text), + (r'"[^"]*"', Comment) + ], + } diff --git a/pygments/lexers/smv.py b/pygments/lexers/smv.py old mode 100644 new mode 100755 index 6dac4af..f21d709 --- a/pygments/lexers/smv.py +++ b/pygments/lexers/smv.py @@ -1,79 +1,79 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.smv - ~~~~~~~~~~~~~~~~~~~ - - Lexers for the SMV languages. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import RegexLexer, words -from pygments.token import Comment, Keyword, Name, Number, Operator, \ - Punctuation, Text - -__all__ = ['NuSMVLexer'] - - -class NuSMVLexer(RegexLexer): - """ - Lexer for the NuSMV language. - - .. versionadded:: 2.2 - """ - - name = 'NuSMV' - aliases = ['nusmv'] - filenames = ['*.smv'] - mimetypes = [] - - tokens = { - 'root': [ - # Comments - (r'(?s)\/\-\-.*?\-\-/', Comment), - (r'--.*\n', Comment), - - # Reserved - (words(('MODULE', 'DEFINE', 'MDEFINE', 'CONSTANTS', 'VAR', 'IVAR', - 'FROZENVAR', 'INIT', 'TRANS', 'INVAR', 'SPEC', 'CTLSPEC', - 'LTLSPEC', 'PSLSPEC', 'COMPUTE', 'NAME', 'INVARSPEC', - 'FAIRNESS', 'JUSTICE', 'COMPASSION', 'ISA', 'ASSIGN', - 'CONSTRAINT', 'SIMPWFF', 'CTLWFF', 'LTLWFF', 'PSLWFF', - 'COMPWFF', 'IN', 'MIN', 'MAX', 'MIRROR', 'PRED', - 'PREDICATES'), suffix=r'(?![\w$#-])'), - Keyword.Declaration), - (r'process(?![\w$#-])', Keyword), - (words(('array', 'of', 'boolean', 'integer', 'real', 'word'), - suffix=r'(?![\w$#-])'), Keyword.Type), - (words(('case', 'esac'), suffix=r'(?![\w$#-])'), Keyword), - (words(('word1', 'bool', 'signed', 'unsigned', 'extend', 'resize', - 'sizeof', 'uwconst', 'swconst', 'init', 'self', 'count', - 'abs', 'max', 'min'), suffix=r'(?![\w$#-])'), - Name.Builtin), - (words(('EX', 'AX', 'EF', 'AF', 'EG', 'AG', 'E', 'F', 'O', 'G', - 'H', 'X', 'Y', 'Z', 'A', 'U', 'S', 'V', 'T', 'BU', 'EBF', - 'ABF', 'EBG', 'ABG', 'next', 'mod', 'union', 'in', 'xor', - 'xnor'), suffix=r'(?![\w$#-])'), - Operator.Word), - (words(('TRUE', 'FALSE'), suffix=r'(?![\w$#-])'), Keyword.Constant), - - # Names - (r'[a-zA-Z_][\w$#-]*', Name.Variable), - - # Operators - (r':=', Operator), - (r'[-&|+*/<>!=]', Operator), - - # Literals - (r'\-?\d+\b', Number.Integer), - (r'0[su][bB]\d*_[01_]+', Number.Bin), - (r'0[su][oO]\d*_[0-7_]+', Number.Oct), - (r'0[su][dD]\d*_[\d_]+', Number.Dec), - (r'0[su][hH]\d*_[\da-fA-F_]+', Number.Hex), - - # Whitespace, punctuation and the rest - (r'\s+', Text.Whitespace), - (r'[()\[\]{};?:.,]', Punctuation), - ], - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.smv + ~~~~~~~~~~~~~~~~~~~ + + Lexers for the SMV languages. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, words +from pygments.token import Comment, Keyword, Name, Number, Operator, \ + Punctuation, Text + +__all__ = ['NuSMVLexer'] + + +class NuSMVLexer(RegexLexer): + """ + Lexer for the NuSMV language. + + .. versionadded:: 2.2 + """ + + name = 'NuSMV' + aliases = ['nusmv'] + filenames = ['*.smv'] + mimetypes = [] + + tokens = { + 'root': [ + # Comments + (r'(?s)\/\-\-.*?\-\-/', Comment), + (r'--.*\n', Comment), + + # Reserved + (words(('MODULE', 'DEFINE', 'MDEFINE', 'CONSTANTS', 'VAR', 'IVAR', + 'FROZENVAR', 'INIT', 'TRANS', 'INVAR', 'SPEC', 'CTLSPEC', + 'LTLSPEC', 'PSLSPEC', 'COMPUTE', 'NAME', 'INVARSPEC', + 'FAIRNESS', 'JUSTICE', 'COMPASSION', 'ISA', 'ASSIGN', + 'CONSTRAINT', 'SIMPWFF', 'CTLWFF', 'LTLWFF', 'PSLWFF', + 'COMPWFF', 'IN', 'MIN', 'MAX', 'MIRROR', 'PRED', + 'PREDICATES'), suffix=r'(?![\w$#-])'), + Keyword.Declaration), + (r'process(?![\w$#-])', Keyword), + (words(('array', 'of', 'boolean', 'integer', 'real', 'word'), + suffix=r'(?![\w$#-])'), Keyword.Type), + (words(('case', 'esac'), suffix=r'(?![\w$#-])'), Keyword), + (words(('word1', 'bool', 'signed', 'unsigned', 'extend', 'resize', + 'sizeof', 'uwconst', 'swconst', 'init', 'self', 'count', + 'abs', 'max', 'min'), suffix=r'(?![\w$#-])'), + Name.Builtin), + (words(('EX', 'AX', 'EF', 'AF', 'EG', 'AG', 'E', 'F', 'O', 'G', + 'H', 'X', 'Y', 'Z', 'A', 'U', 'S', 'V', 'T', 'BU', 'EBF', + 'ABF', 'EBG', 'ABG', 'next', 'mod', 'union', 'in', 'xor', + 'xnor'), suffix=r'(?![\w$#-])'), + Operator.Word), + (words(('TRUE', 'FALSE'), suffix=r'(?![\w$#-])'), Keyword.Constant), + + # Names + (r'[a-zA-Z_][\w$#-]*', Name.Variable), + + # Operators + (r':=', Operator), + (r'[-&|+*/<>!=]', Operator), + + # Literals + (r'\-?\d+\b', Number.Integer), + (r'0[su][bB]\d*_[01_]+', Number.Bin), + (r'0[su][oO]\d*_[0-7_]+', Number.Oct), + (r'0[su][dD]\d*_[\d_]+', Number.Dec), + (r'0[su][hH]\d*_[\da-fA-F_]+', Number.Hex), + + # Whitespace, punctuation and the rest + (r'\s+', Text.Whitespace), + (r'[()\[\]{};?:.,]', Punctuation), + ], + } diff --git a/pygments/lexers/snobol.py b/pygments/lexers/snobol.py old mode 100644 new mode 100755 index ce52f7c..26c29bb --- a/pygments/lexers/snobol.py +++ b/pygments/lexers/snobol.py @@ -1,83 +1,83 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.snobol - ~~~~~~~~~~~~~~~~~~~~~~ - - Lexers for the SNOBOL language. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import RegexLexer, bygroups -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation - -__all__ = ['SnobolLexer'] - - -class SnobolLexer(RegexLexer): - """ - Lexer for the SNOBOL4 programming language. - - Recognizes the common ASCII equivalents of the original SNOBOL4 operators. - Does not require spaces around binary operators. - - .. versionadded:: 1.5 - """ - - name = "Snobol" - aliases = ["snobol"] - filenames = ['*.snobol'] - mimetypes = ['text/x-snobol'] - - tokens = { - # root state, start of line - # comments, continuation lines, and directives start in column 1 - # as do labels - 'root': [ - (r'\*.*\n', Comment), - (r'[+.] ', Punctuation, 'statement'), - (r'-.*\n', Comment), - (r'END\s*\n', Name.Label, 'heredoc'), - (r'[A-Za-z$][\w$]*', Name.Label, 'statement'), - (r'\s+', Text, 'statement'), - ], - # statement state, line after continuation or label - 'statement': [ - (r'\s*\n', Text, '#pop'), - (r'\s+', Text), - (r'(?<=[^\w.])(LT|LE|EQ|NE|GE|GT|INTEGER|IDENT|DIFFER|LGT|SIZE|' - r'REPLACE|TRIM|DUPL|REMDR|DATE|TIME|EVAL|APPLY|OPSYN|LOAD|UNLOAD|' - r'LEN|SPAN|BREAK|ANY|NOTANY|TAB|RTAB|REM|POS|RPOS|FAIL|FENCE|' - r'ABORT|ARB|ARBNO|BAL|SUCCEED|INPUT|OUTPUT|TERMINAL)(?=[^\w.])', - Name.Builtin), - (r'[A-Za-z][\w.]*', Name), - # ASCII equivalents of original operators - # | for the EBCDIC equivalent, ! likewise - # \ for EBCDIC negation - (r'\*\*|[?$.!%*/#+\-@|&\\=]', Operator), - (r'"[^"]*"', String), - (r"'[^']*'", String), - # Accept SPITBOL syntax for real numbers - # as well as Macro SNOBOL4 - (r'[0-9]+(?=[^.EeDd])', Number.Integer), - (r'[0-9]+(\.[0-9]*)?([EDed][-+]?[0-9]+)?', Number.Float), - # Goto - (r':', Punctuation, 'goto'), - (r'[()<>,;]', Punctuation), - ], - # Goto block - 'goto': [ - (r'\s*\n', Text, "#pop:2"), - (r'\s+', Text), - (r'F|S', Keyword), - (r'(\()([A-Za-z][\w.]*)(\))', - bygroups(Punctuation, Name.Label, Punctuation)) - ], - # everything after the END statement is basically one - # big heredoc. - 'heredoc': [ - (r'.*\n', String.Heredoc) - ] - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.snobol + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for the SNOBOL language. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pygments.lexer import RegexLexer, bygroups +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation + +__all__ = ['SnobolLexer'] + + +class SnobolLexer(RegexLexer): + """ + Lexer for the SNOBOL4 programming language. + + Recognizes the common ASCII equivalents of the original SNOBOL4 operators. + Does not require spaces around binary operators. + + .. versionadded:: 1.5 + """ + + name = "Snobol" + aliases = ["snobol"] + filenames = ['*.snobol'] + mimetypes = ['text/x-snobol'] + + tokens = { + # root state, start of line + # comments, continuation lines, and directives start in column 1 + # as do labels + 'root': [ + (r'\*.*\n', Comment), + (r'[+.] ', Punctuation, 'statement'), + (r'-.*\n', Comment), + (r'END\s*\n', Name.Label, 'heredoc'), + (r'[A-Za-z$][\w$]*', Name.Label, 'statement'), + (r'\s+', Text, 'statement'), + ], + # statement state, line after continuation or label + 'statement': [ + (r'\s*\n', Text, '#pop'), + (r'\s+', Text), + (r'(?<=[^\w.])(LT|LE|EQ|NE|GE|GT|INTEGER|IDENT|DIFFER|LGT|SIZE|' + r'REPLACE|TRIM|DUPL|REMDR|DATE|TIME|EVAL|APPLY|OPSYN|LOAD|UNLOAD|' + r'LEN|SPAN|BREAK|ANY|NOTANY|TAB|RTAB|REM|POS|RPOS|FAIL|FENCE|' + r'ABORT|ARB|ARBNO|BAL|SUCCEED|INPUT|OUTPUT|TERMINAL)(?=[^\w.])', + Name.Builtin), + (r'[A-Za-z][\w.]*', Name), + # ASCII equivalents of original operators + # | for the EBCDIC equivalent, ! likewise + # \ for EBCDIC negation + (r'\*\*|[?$.!%*/#+\-@|&\\=]', Operator), + (r'"[^"]*"', String), + (r"'[^']*'", String), + # Accept SPITBOL syntax for real numbers + # as well as Macro SNOBOL4 + (r'[0-9]+(?=[^.EeDd])', Number.Integer), + (r'[0-9]+(\.[0-9]*)?([EDed][-+]?[0-9]+)?', Number.Float), + # Goto + (r':', Punctuation, 'goto'), + (r'[()<>,;]', Punctuation), + ], + # Goto block + 'goto': [ + (r'\s*\n', Text, "#pop:2"), + (r'\s+', Text), + (r'F|S', Keyword), + (r'(\()([A-Za-z][\w.]*)(\))', + bygroups(Punctuation, Name.Label, Punctuation)) + ], + # everything after the END statement is basically one + # big heredoc. + 'heredoc': [ + (r'.*\n', String.Heredoc) + ] + } diff --git a/pygments/lexers/solidity.py b/pygments/lexers/solidity.py old mode 100644 new mode 100755 index 9966837..5d71f69 --- a/pygments/lexers/solidity.py +++ b/pygments/lexers/solidity.py @@ -1,93 +1,92 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.solidity - ~~~~~~~~~~~~~~~~~~~~~~~~ - - Lexers for Solidity. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, bygroups, include, words -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation - -__all__ = ['SolidityLexer'] - - -class SolidityLexer(RegexLexer): - """ - For Solidity source code. - - .. versionadded:: 2.5 - """ - - name = 'Solidity' - aliases = ['solidity'] - filenames = ['*.sol'] - mimetypes = [] - - flags = re.MULTILINE | re.UNICODE - - datatype = ( - r'\b(address|bool|((bytes|hash|int|string|uint)(8|16|24|32|40|48|56|64' - r'|72|80|88|96|104|112|120|128|136|144|152|160|168|176|184|192|200|208' - r'|216|224|232|240|248|256)?))\b' - ) - - tokens = { - 'root': [ - include('whitespace'), - include('comments'), - (r'\bpragma\s+solidity\b', Keyword, 'pragma'), - (r'\b(contract)(\s+)([a-zA-Z_]\w*)', - bygroups(Keyword, Text.WhiteSpace, Name.Entity)), - (datatype + r'(\s+)((external|public|internal|private)\s+)?' + - r'([a-zA-Z_]\w*)', - bygroups(Keyword.Type, None, None, None, Text.WhiteSpace, Keyword, - None, Name.Variable)), - (r'\b(enum|event|function|struct)(\s+)([a-zA-Z_]\w*)', - bygroups(Keyword.Type, Text.WhiteSpace, Name.Variable)), - (r'\b(msg|block|tx)\.([A-Za-z_][A-Za-z0-9_]*)\b', Keyword), - (words(( - 'block', 'break', 'constant', 'constructor', 'continue', - 'contract', 'do', 'else', 'external', 'false', 'for', - 'function', 'if', 'import', 'inherited', 'internal', 'is', - 'library', 'mapping', 'memory', 'modifier', 'msg', 'new', - 'payable', 'private', 'public', 'require', 'return', - 'returns', 'struct', 'suicide', 'throw', 'this', 'true', - 'tx', 'var', 'while'), prefix=r'\b', suffix=r'\b'), - Keyword.Type), - (words(('keccak256',), prefix=r'\b', suffix=r'\b'), Name.Builtin), - (datatype, Keyword.Type), - include('constants'), - (r'[a-zA-Z_]\w*', Text), - (r'[!<=>+*/-]', Operator), - (r'[.;:{}(),\[\]]', Punctuation) - ], - 'comments': [ - (r'//(\n|[\w\W]*?[^\\]\n)', Comment.Single), - (r'/(\\\n)?[*][\w\W]*?[*](\\\n)?/', Comment.Multiline), - (r'/(\\\n)?[*][\w\W]*', Comment.Multiline) - ], - 'constants': [ - (r'("([\\]"|.)*?")', String.Double), - (r"('([\\]'|.)*?')", String.Single), - (r'\b0[xX][0-9a-fA-F]+\b', Number.Hex), - (r'\b\d+\b', Number.Decimal), - ], - 'pragma': [ - include('whitespace'), - include('comments'), - (r'(\^|>=|<)(\s*)(\d+\.\d+\.\d+)', - bygroups(Operator, Text.WhiteSpace, Keyword)), - (r';', Punctuation, '#pop') - ], - 'whitespace': [ - (r'\s+', Text.WhiteSpace), - (r'\n', Text.WhiteSpace) - ] - } +# -*- coding: utf-8 -*- +""" + pygments.lexers.solidity + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Solidity. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pygments.lexer import RegexLexer, bygroups, include, words +from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Whitespace + +__all__ = ['SolidityLexer'] + + +class SolidityLexer(RegexLexer): + """ + For Solidity source code. + + .. versionadded:: 2.5 + """ + + name = 'Solidity' + aliases = ['solidity'] + filenames = ['*.sol'] + mimetypes = [] + + flags = re.MULTILINE | re.UNICODE + + datatype = ( + r'\b(address|bool|(?:(?:bytes|hash|int|string|uint)(?:8|16|24|32|40|48|56|64' + r'|72|80|88|96|104|112|120|128|136|144|152|160|168|176|184|192|200|208' + r'|216|224|232|240|248|256)?))\b' + ) + + tokens = { + 'root': [ + include('whitespace'), + include('comments'), + (r'\bpragma\s+solidity\b', Keyword, 'pragma'), + (r'\b(contract)(\s+)([a-zA-Z_]\w*)', + bygroups(Keyword, Whitespace, Name.Entity)), + (datatype + r'(\s+)((?:external|public|internal|private)\s+)?' + + r'([a-zA-Z_]\w*)', + bygroups(Keyword.Type, Whitespace, Keyword, Name.Variable)), + (r'\b(enum|event|function|struct)(\s+)([a-zA-Z_]\w*)', + bygroups(Keyword.Type, Whitespace, Name.Variable)), + (r'\b(msg|block|tx)\.([A-Za-z_][a-zA-Z0-9_]*)\b', Keyword), + (words(( + 'block', 'break', 'constant', 'constructor', 'continue', + 'contract', 'do', 'else', 'external', 'false', 'for', + 'function', 'if', 'import', 'inherited', 'internal', 'is', + 'library', 'mapping', 'memory', 'modifier', 'msg', 'new', + 'payable', 'private', 'public', 'require', 'return', + 'returns', 'struct', 'suicide', 'throw', 'this', 'true', + 'tx', 'var', 'while'), prefix=r'\b', suffix=r'\b'), + Keyword.Type), + (words(('keccak256',), prefix=r'\b', suffix=r'\b'), Name.Builtin), + (datatype, Keyword.Type), + include('constants'), + (r'[a-zA-Z_]\w*', Text), + (r'[!<=>+*/-]', Operator), + (r'[.;:{}(),\[\]]', Punctuation) + ], + 'comments': [ + (r'//(\n|[\w\W]*?[^\\]\n)', Comment.Single), + (r'/(\\\n)?[*][\w\W]*?[*](\\\n)?/', Comment.Multiline), + (r'/(\\\n)?[*][\w\W]*', Comment.Multiline) + ], + 'constants': [ + (r'("(\\"|.)*?")', String.Double), + (r"('(\\'|.)*?')", String.Single), + (r'\b0[xX][0-9a-fA-F]+\b', Number.Hex), + (r'\b\d+\b', Number.Decimal), + ], + 'pragma': [ + include('whitespace'), + include('comments'), + (r'(\^|>=|<)(\s*)(\d+\.\d+\.\d+)', + bygroups(Operator, Whitespace, Keyword)), + (r';', Punctuation, '#pop') + ], + 'whitespace': [ + (r'\s+', Whitespace), + (r'\n', Whitespace) + ] + } diff --git a/pygments/lexers/special.py b/pygments/lexers/special.py old mode 100644 new mode 100755 index e97f194..28a3dbb --- a/pygments/lexers/special.py +++ b/pygments/lexers/special.py @@ -1,105 +1,105 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.special - ~~~~~~~~~~~~~~~~~~~~~~~ - - Special lexers. - - :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re -from io import BytesIO - -from pygments.lexer import Lexer -from pygments.token import Token, Error, Text -from pygments.util import get_choice_opt - - -__all__ = ['TextLexer', 'RawTokenLexer'] - - -class TextLexer(Lexer): - """ - "Null" lexer, doesn't highlight anything. - """ - name = 'Text only' - aliases = ['text'] - filenames = ['*.txt'] - mimetypes = ['text/plain'] - priority = 0.01 - - def get_tokens_unprocessed(self, text): - yield 0, Text, text - - def analyse_text(text): - return TextLexer.priority - - -_ttype_cache = {} - -line_re = re.compile(b'.*?\n') - - -class RawTokenLexer(Lexer): - """ - Recreate a token stream formatted with the `RawTokenFormatter`. This - lexer raises exceptions during parsing if the token stream in the - file is malformed. - - Additional options accepted: - - `compress` - If set to ``"gz"`` or ``"bz2"``, decompress the token stream with - the given compression algorithm before lexing (default: ``""``). - """ - name = 'Raw token data' - aliases = ['raw'] - filenames = [] - mimetypes = ['application/x-pygments-tokens'] - - def __init__(self, **options): - self.compress = get_choice_opt(options, 'compress', - ['', 'none', 'gz', 'bz2'], '') - Lexer.__init__(self, **options) - - def get_tokens(self, text): - if isinstance(text, str): - # raw token stream never has any non-ASCII characters - text = text.encode('ascii') - if self.compress == 'gz': - import gzip - gzipfile = gzip.GzipFile('', 'rb', 9, BytesIO(text)) - text = gzipfile.read() - elif self.compress == 'bz2': - import bz2 - text = bz2.decompress(text) - - # do not call Lexer.get_tokens() because we do not want Unicode - # decoding to occur, and stripping is not optional. - text = text.strip(b'\n') + b'\n' - for i, t, v in self.get_tokens_unprocessed(text): - yield t, v - - def get_tokens_unprocessed(self, text): - length = 0 - for match in line_re.finditer(text): - try: - ttypestr, val = match.group().split(b'\t', 1) - except ValueError: - val = match.group().decode('ascii', 'replace') - ttype = Error - else: - ttype = _ttype_cache.get(ttypestr) - if not ttype: - ttype = Token - ttypes = ttypestr.split('.')[1:] - for ttype_ in ttypes: - if not ttype_ or not ttype_[0].isupper(): - raise ValueError('malformed token name') - ttype = getattr(ttype, ttype_) - _ttype_cache[ttypestr] = ttype - val = val[2:-2].decode('unicode-escape') - yield length, ttype, val - length += len(val) +# -*- coding: utf-8 -*- +""" + pygments.lexers.special + ~~~~~~~~~~~~~~~~~~~~~~~ + + Special lexers. + + :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re +from io import BytesIO + +from pygments.lexer import Lexer +from pygments.token import Token, Error, Text +from pygments.util import get_choice_opt + + +__all__ = ['TextLexer', 'RawTokenLexer'] + + +class TextLexer(Lexer): + """ + "Null" lexer, doesn't highlight anything. + """ + name = 'Text only' + aliases = ['text'] + filenames = ['*.txt'] + mimetypes = ['text/plain'] + priority = 0.01 + + def get_tokens_unprocessed(self, text): + yield 0, Text, text + + def analyse_text(text): + return TextLexer.priority + + +_ttype_cache = {} + +line_re = re.compile(b'.*?\n') + + +class RawTokenLexer(Lexer): + """ + Recreate a token stream formatted with the `RawTokenFormatter`. This + lexer raises exceptions during parsing if the token stream in the + file is malformed. + + Additional options accepted: + + `compress` + If set to ``"gz"`` or ``"bz2"``, decompress the token stream with + the given compression algorithm before lexing (default: ``""``). + """ + name = 'Raw token data' + aliases = ['raw'] + filenames = [] + mimetypes = ['application/x-pygments-tokens'] + + def __init__(self, **options): + self.compress = get_choice_opt(options, 'compress', + ['', 'none', 'gz', 'bz2'], '') + Lexer.__init__(self, **options) + + def get_tokens(self, text): + if isinstance(text, str): + # raw token stream never has any non-ASCII characters + text = text.encode('ascii') + if self.compress == 'gz': + import gzip + gzipfile = gzip.GzipFile('', 'rb', 9, BytesIO(text)) + text = gzipfile.read() + elif self.compress == 'bz2': + import bz2 + text = bz2.decompress(text) + + # do not call Lexer.get_tokens() because we do not want Unicode + # decoding to occur, and stripping is not optional. + text = text.strip(b'\n') + b'\n' + for i, t, v in self.get_tokens_unprocessed(text): + yield t, v + + def get_tokens_unprocessed(self, text): + length = 0 + for match in line_re.finditer(text): + try: + ttypestr, val = match.group().split(b'\t', 1) + except ValueError: + val = match.group().decode('ascii', 'replace') + ttype = Error + else: + ttype = _ttype_cache.get(ttypestr) + if not ttype: + ttype = Token + ttypes = ttypestr.split('.')[1:] + for ttype_ in ttypes: + if not ttype_ or not ttype_[0].isupper(): + raise ValueError('malformed token name') + ttype = getattr(ttype, ttype_) + _ttype_cache[ttypestr] = ttype + val = val[2:-2].decode('unicode-escape') + yield length, ttype, val + length += len(val) diff --git a/pygments/lexers/sql.py b/pygments/lexers/sql.py old mode 100644 new mode 100755 index da7c986..7bc673b --- a/pygments/lexers/sql.py +++ b/pygments/lexers/sql.py @@ -1,744 +1,837 @@ -# -*- coding: utf-8 -*- -""" - pygments.lexers.sql - ~~~~~~~~~~~~~~~~~~~ - - Lexers for various SQL dialects and related interactive sessions. - - Postgres specific lexers: - - `PostgresLexer` - A SQL lexer for the PostgreSQL dialect. Differences w.r.t. the SQL - lexer are: - - - keywords and data types list parsed from the PG docs (run the - `_postgres_builtins` module to update them); - - Content of $-strings parsed using a specific lexer, e.g. the content - of a PL/Python function is parsed using the Python lexer; - - parse PG specific constructs: E-strings, $-strings, U&-strings, - different operators and punctuation. - - `PlPgsqlLexer` - A lexer for the PL/pgSQL language. Adds a few specific construct on - top of the PG SQL lexer (such as <